Merge branch '3.0' of github.com:taosdata/TDengine into 3.0

This commit is contained in:
dapan1121 2022-07-23 15:14:11 +08:00
commit d3e07abb4e
41 changed files with 4611 additions and 774 deletions

View File

@ -184,8 +184,8 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u
int32_t getJsonValueLen(const char* data); int32_t getJsonValueLen(const char* data);
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull); int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, uint32_t* capacity, int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, uint32_t numOfRow2); const SColumnInfoData* pSource, int32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows, int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
const SDataBlockInfo* pBlockInfo); const SDataBlockInfo* pBlockInfo);
int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex); int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex);

View File

@ -65,7 +65,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers);
* @return * @return
*/ */
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols,
SSchemaWrapper** pSchemaWrapper); SSchemaWrapper** pSchema);
/** /**
* Set the input data block for the stream scan. * Set the input data block for the stream scan.
@ -196,6 +196,8 @@ int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem);
int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer); int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer);
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -8,7 +8,7 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
ADD_EXECUTABLE(clientTest clientTests.cpp) ADD_EXECUTABLE(clientTest clientTests.cpp)
TARGET_LINK_LIBRARIES( TARGET_LINK_LIBRARIES(
clientTest clientTest
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom executor
) )
ADD_EXECUTABLE(tmqTest tmqTest.cpp) ADD_EXECUTABLE(tmqTest tmqTest.cpp)

View File

@ -27,6 +27,7 @@
#pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wsign-compare"
#include "taos.h" #include "taos.h"
#include "executor.h"
namespace { namespace {
void showDB(TAOS* pConn) { void showDB(TAOS* pConn) {
@ -823,6 +824,17 @@ TEST(testCase, async_api_test) {
TEST(testCase, update_test) { TEST(testCase, update_test) {
SInterval interval = {0};
interval.offset = 8000;
interval.interval = 10000;
interval.sliding = 4000;
interval.intervalUnit = 's';
interval.offsetUnit = 's';
interval.slidingUnit = 's';
// STimeWindow w = getAlignQueryTimeWindow(&interval, 0, 1630000000000);
STimeWindow w = getAlignQueryTimeWindow(&interval, 0, 1629999999999);
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0); TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr); ASSERT_NE(pConn, nullptr);

View File

@ -214,8 +214,8 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c
} }
} }
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, uint32_t* capacity, int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, uint32_t numOfRow2) { const SColumnInfoData* pSource, int32_t numOfRow2) {
ASSERT(pColumnInfoData != NULL && pSource != NULL && pColumnInfoData->info.type == pSource->info.type); ASSERT(pColumnInfoData != NULL && pSource != NULL && pColumnInfoData->info.type == pSource->info.type);
if (numOfRow2 == 0) { if (numOfRow2 == 0) {
return numOfRow1; return numOfRow1;
@ -263,7 +263,8 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, uint32_t numOfRow1, ui
pColumnInfoData->varmeta.length = len + oldLen; pColumnInfoData->varmeta.length = len + oldLen;
} else { } else {
if (finalNumOfRows > *capacity || (numOfRow1 == 0 && pColumnInfoData->info.bytes != 0)) { if (finalNumOfRows > *capacity || (numOfRow1 == 0 && pColumnInfoData->info.bytes != 0)) {
ASSERT(finalNumOfRows * pColumnInfoData->info.bytes); // all data may be null, when the pColumnInfoData->info.type == 0, bytes == 0;
// ASSERT(finalNumOfRows * pColumnInfoData->info.bytes);
char* tmp = taosMemoryRealloc(pColumnInfoData->pData, finalNumOfRows * pColumnInfoData->info.bytes); char* tmp = taosMemoryRealloc(pColumnInfoData->pData, finalNumOfRows * pColumnInfoData->info.bytes);
if (tmp == NULL) { if (tmp == NULL) {
return TSDB_CODE_VND_OUT_OF_MEMORY; return TSDB_CODE_VND_OUT_OF_MEMORY;

View File

@ -815,20 +815,17 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
if (pInterval->offset > 0) { if (pInterval->offset > 0) {
start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision); start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision);
if (start > t) {
start = taosTimeAdd(start, -pInterval->interval, pInterval->intervalUnit, precision);
} else {
// try to move current window to the left-hande-side, due to the offset effect.
int64_t end = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1;
int64_t newEnd = end; // try to move current window to the left-hande-side, due to the offset effect.
while(newEnd >= t) { int64_t end = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1;
end = newEnd;
newEnd = taosTimeAdd(newEnd, -pInterval->sliding, pInterval->slidingUnit, precision);
}
start = taosTimeAdd(end, -pInterval->interval, pInterval->intervalUnit, precision) + 1; int64_t newEnd = end;
while (newEnd >= t) {
end = newEnd;
newEnd = taosTimeAdd(newEnd, -pInterval->sliding, pInterval->slidingUnit, precision);
} }
start = taosTimeAdd(end, -pInterval->interval, pInterval->intervalUnit, precision) + 1;
} }
return start; return start;

View File

@ -569,8 +569,10 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
taosArrayDestroy(tbUidList); taosArrayDestroy(tbUidList);
} }
taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle)); taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %ld", req.subKey, pHandle->consumerId);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) { if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
// TODO // TODO
ASSERT(0);
} }
} else { } else {
/*ASSERT(pExec->consumerId == req.oldConsumerId);*/ /*ASSERT(pExec->consumerId == req.oldConsumerId);*/

View File

@ -101,6 +101,7 @@ int32_t tqMetaOpen(STQ* pTq) {
handle.execHandle.execDb.pFilterOutTbUid = handle.execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
} }
tqDebug("tq restore %s consumer %ld", handle.subKey, handle.consumerId);
taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle)); taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle));
} }

View File

@ -112,8 +112,8 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
tEncodeSize(tEncodeDeleteRes, &res, size, ret); tEncodeSize(tEncodeDeleteRes, &res, size, ret);
pCont = rpcMallocCont(size + sizeof(SMsgHead)); pCont = rpcMallocCont(size + sizeof(SMsgHead));
((SMsgHead *)pCont)->contLen = htonl(size + sizeof(SMsgHead)); ((SMsgHead *)pCont)->contLen = size + sizeof(SMsgHead);
((SMsgHead *)pCont)->vgId = htonl(TD_VID(pVnode)); ((SMsgHead *)pCont)->vgId = TD_VID(pVnode);
tEncoderInit(pCoder, pCont + sizeof(SMsgHead), size); tEncoderInit(pCoder, pCont + sizeof(SMsgHead), size);
tEncodeDeleteRes(pCoder, &res); tEncodeDeleteRes(pCoder, &res);

View File

@ -108,6 +108,9 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode);
EDealRes doTranslateTagExpr(SNode** pNode, void* pContext); EDealRes doTranslateTagExpr(SNode** pNode, void* pContext);
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo); int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo);
int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId);
size_t getTableTagsBufLen(const SNodeList* pGroups);
SArray* createSortInfo(SNodeList* pNodeList); SArray* createSortInfo(SNodeList* pNodeList);
SArray* extractPartitionColInfo(SNodeList* pNodeList); SArray* extractPartitionColInfo(SNodeList* pNodeList);
SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols,
@ -129,6 +132,6 @@ int32_t convertFillType(int32_t mode);
int32_t resultrowComparAsc(const void* p1, const void* p2); int32_t resultrowComparAsc(const void* p1, const void* p2);
int32_t isTableOk(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified); int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified);
#endif // TDENGINE_QUERYUTIL_H #endif // TDENGINE_QUERYUTIL_H

View File

@ -422,6 +422,7 @@ typedef struct SStreamScanInfo {
// status for tmq // status for tmq
// SSchemaWrapper schema; // SSchemaWrapper schema;
STqOffset offset; STqOffset offset;
SNodeList* pGroupTags;
SNode* pTagCond; SNode* pTagCond;
SNode* pTagIndexCond; SNode* pTagIndexCond;
} SStreamScanInfo; } SStreamScanInfo;
@ -544,9 +545,10 @@ typedef struct SProjectOperatorInfo {
SOptrBasicInfo binfo; SOptrBasicInfo binfo;
SAggSupporter aggSup; SAggSupporter aggSup;
SNode* pFilterNode; // filter info, which is push down by optimizer SNode* pFilterNode; // filter info, which is push down by optimizer
SSDataBlock* existDataBlock;
SArray* pPseudoColInfo; SArray* pPseudoColInfo;
SLimitInfo limitInfo; SLimitInfo limitInfo;
bool mergeDataBlocks;
SSDataBlock* pFinalRes;
} SProjectOperatorInfo; } SProjectOperatorInfo;
typedef struct SIndefOperatorInfo { typedef struct SIndefOperatorInfo {
@ -803,7 +805,7 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scan
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz); int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
void doSetOperatorCompleted(SOperatorInfo* pOperator); void doSetOperatorCompleted(SOperatorInfo* pOperator);
void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock); void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, const SArray* pColMatchInfo);
int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr, int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr,
SSDataBlock* pBlock, const char* idStr); SSDataBlock* pBlock, const char* idStr);
@ -867,7 +869,7 @@ SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* re
SExecTaskInfo* pTaskInfo); SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond, SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond,
STimeWindowAggSupp* pTwAggSup, SExecTaskInfo* pTaskInfo); SExecTaskInfo* pTaskInfo);
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
@ -877,8 +879,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, /*SExprInfo* pExprInfo, int32_t numOfCols, SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
SSDataBlock* pResultBlock, const SNodeListNode* pValNode, */SExecTaskInfo* pTaskInfo);
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SJoinPhysiNode* pJoinNode, SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SJoinPhysiNode* pJoinNode,
SExecTaskInfo* pTaskInfo); SExecTaskInfo* pTaskInfo);

View File

@ -265,7 +265,7 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE; return DEAL_RES_CONTINUE;
} }
int32_t isTableOk(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified) { int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
SMetaReader mr = {0}; SMetaReader mr = {0};
@ -356,7 +356,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i); STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i);
bool qualified = true; bool qualified = true;
code = isTableOk(info, pTagCond, metaHandle, &qualified); code = isQualifiedTable(info, pTagCond, metaHandle, &qualified);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -379,6 +379,82 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
return code; return code;
} }
size_t getTableTagsBufLen(const SNodeList* pGroups) {
size_t keyLen = 0;
SNode* node;
FOREACH(node, pGroups) {
SExprNode* pExpr = (SExprNode*)node;
keyLen += pExpr->resType.bytes;
}
keyLen += sizeof(int8_t) * LIST_LENGTH(pGroups);
return keyLen;
}
int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) {
SMetaReader mr = {0};
metaReaderInit(&mr, pMeta, 0);
metaGetTableEntryByUid(&mr, uid);
SNodeList* groupNew = nodesCloneList(pGroupNode);
nodesRewriteExprsPostOrder(groupNew, doTranslateTagExpr, &mr);
char* isNull = (char*)keyBuf;
char* pStart = (char*)keyBuf + sizeof(int8_t)*LIST_LENGTH(pGroupNode);
SNode* pNode;
int32_t index = 0;
FOREACH(pNode, groupNew) {
SNode* pNew = NULL;
int32_t code = scalarCalculateConstants(pNode, &pNew);
if (TSDB_CODE_SUCCESS == code) {
REPLACE_NODE(pNew);
} else {
taosMemoryFree(keyBuf);
nodesDestroyList(groupNew);
metaReaderClear(&mr);
return code;
}
ASSERT(nodeType(pNew) == QUERY_NODE_VALUE);
SValueNode* pValue = (SValueNode*)pNew;
if (pValue->node.resType.type == TSDB_DATA_TYPE_NULL || pValue->isNull) {
isNull[index++] = 1;
continue;
} else {
isNull[index++] = 0;
char* data = nodesGetValueFromNode(pValue);
if (pValue->node.resType.type == TSDB_DATA_TYPE_JSON) {
if (tTagIsJson(data)) {
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
taosMemoryFree(keyBuf);
nodesDestroyList(groupNew);
metaReaderClear(&mr);
return terrno;
}
int32_t len = getJsonValueLen(data);
memcpy(pStart, data, len);
pStart += len;
} else if (IS_VAR_DATA_TYPE(pValue->node.resType.type)) {
memcpy(pStart, data, varDataTLen(data));
pStart += varDataTLen(data);
} else {
memcpy(pStart, data, pValue->node.resType.bytes);
pStart += pValue->node.resType.bytes;
}
}
}
int32_t len = (int32_t)(pStart - (char*)keyBuf);
*pGroupId = calcGroupId(keyBuf, len);
nodesDestroyList(groupNew);
metaReaderClear(&mr);
return TSDB_CODE_SUCCESS;
}
SArray* extractPartitionColInfo(SNodeList* pNodeList) { SArray* extractPartitionColInfo(SNodeList* pNodeList) {
if (!pNodeList) { if (!pNodeList) {
return NULL; return NULL;

View File

@ -14,10 +14,21 @@
*/ */
#include "executor.h" #include "executor.h"
#include "tref.h"
#include "executorimpl.h" #include "executorimpl.h"
#include "planner.h" #include "planner.h"
#include "tdatablock.h" #include "tdatablock.h"
#include "vnode.h" #include "vnode.h"
#include "tudf.h"
static TdThreadOnce initPoolOnce = PTHREAD_ONCE_INIT;
int32_t exchangeObjRefPool = -1;
static void initRefPool() { exchangeObjRefPool = taosOpenRef(1024, doDestroyExchangeOperatorInfo); }
static void cleanupRefPool() {
int32_t ref = atomic_val_compare_exchange_32(&exchangeObjRefPool, exchangeObjRefPool, 0);
taosCloseRef(ref);
}
static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid, static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid,
char* id) { char* id) {
@ -120,8 +131,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
return code; return code;
} }
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema) {
SSchemaWrapper** pSchemaWrapper) {
if (msg == NULL) { if (msg == NULL) {
// TODO create raw scan // TODO create raw scan
return NULL; return NULL;
@ -155,7 +165,7 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n
} }
} }
*pSchemaWrapper = tCloneSSchemaWrapper(((SExecTaskInfo*)pTaskInfo)->schemaInfo.qsw); *pSchema = tCloneSSchemaWrapper(((SExecTaskInfo*)pTaskInfo)->schemaInfo.qsw);
return pTaskInfo; return pTaskInfo;
} }
@ -185,8 +195,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers) {
return pTaskInfo; return pTaskInfo;
} }
static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, const SArray* tableIdList, static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const SArray* tableIdList, const char* idstr) {
const char* idstr) {
SArray* qa = taosArrayInit(4, sizeof(tb_uid_t)); SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
// let's discard the tables those are not created according to the queried super table. // let's discard the tables those are not created according to the queried super table.
@ -209,7 +218,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
if (pScanInfo->pTagCond != NULL) { if (pScanInfo->pTagCond != NULL) {
bool qualified = false; bool qualified = false;
STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid}; STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid};
code = isTableOk(&info, pScanInfo->pTagCond, pScanInfo->readHandle.meta, &qualified); code = isQualifiedTable(&info, pScanInfo->pTagCond, pScanInfo->readHandle.meta, &qualified);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
qError("failed to filter new table, uid:0x%" PRIx64 ", %s", info.uid, idstr); qError("failed to filter new table, uid:0x%" PRIx64 ", %s", info.uid, idstr);
continue; continue;
@ -240,7 +249,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
int32_t code = 0; int32_t code = 0;
SStreamScanInfo* pScanInfo = pInfo->info; SStreamScanInfo* pScanInfo = pInfo->info;
if (isAdd) { // add new table id if (isAdd) { // add new table id
SArray* qa = filterQualifiedChildTables(pScanInfo, tableIdList, GET_TASKID(pTaskInfo)); SArray* qa = filterUnqualifiedTables(pScanInfo, tableIdList, GET_TASKID(pTaskInfo));
qDebug(" %d qualified child tables added into stream scanner", (int32_t)taosArrayGetSize(qa)); qDebug(" %d qualified child tables added into stream scanner", (int32_t)taosArrayGetSize(qa));
code = tqReaderAddTbUidList(pScanInfo->tqReader, qa); code = tqReaderAddTbUidList(pScanInfo->tqReader, qa);
@ -248,17 +257,35 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
return code; return code;
} }
// add to qTaskInfo
// todo refactor STableList // todo refactor STableList
for (int32_t i = 0; i < taosArrayGetSize(qa); ++i) { size_t bufLen = (pScanInfo->pGroupTags != NULL)? getTableTagsBufLen(pScanInfo->pGroupTags):0;
uint64_t* uid = taosArrayGet(qa, i); char* keyBuf = NULL;
if (bufLen > 0) {
qDebug("table %ld added to task info", *uid); keyBuf = taosMemoryMalloc(bufLen);
if (keyBuf == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
for(int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
uint64_t* uid = taosArrayGet(qa, i);
STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0}; STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0};
if (bufLen > 0) {
code = getGroupIdFromTagsVal(pScanInfo->readHandle.meta, keyInfo.uid, pScanInfo->pGroupTags, keyBuf,
&keyInfo.groupId);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
}
taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo); taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo);
} }
if (keyBuf != NULL) {
taosMemoryFree(keyBuf);
}
taosArrayDestroy(qa); taosArrayDestroy(qa);
} else { // remove the table id in current list } else { // remove the table id in current list
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList)); qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
@ -292,3 +319,396 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
return 0; return 0;
} }
int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, SSubplan* pSubplan,
qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, const char* sql, EOPTR_EXEC_MODEL model) {
assert(pSubplan != NULL);
SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo;
taosThreadOnce(&initPoolOnce, initRefPool);
atexit(cleanupRefPool);
int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 1000, .maxDataBlockNumPerQuery = 100};
code = dsDataSinkMgtInit(&cfg);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
if (handle) {
void* pSinkParam = NULL;
code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, pTaskInfo, readHandle);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
code = dsCreateDataSinker(pSubplan->pDataSink, handle, pSinkParam);
}
_error:
// if failed to add ref for all tables in this query, abort current query
return code;
}
#ifdef TEST_IMPL
// wait moment
int waitMoment(SQInfo* pQInfo) {
if (pQInfo->sql) {
int ms = 0;
char* pcnt = strstr(pQInfo->sql, " count(*)");
if (pcnt) return 0;
char* pos = strstr(pQInfo->sql, " t_");
if (pos) {
pos += 3;
ms = atoi(pos);
while (*pos >= '0' && *pos <= '9') {
pos++;
}
char unit_char = *pos;
if (unit_char == 'h') {
ms *= 3600 * 1000;
} else if (unit_char == 'm') {
ms *= 60 * 1000;
} else if (unit_char == 's') {
ms *= 1000;
}
}
if (ms == 0) return 0;
printf("test wait sleep %dms. sql=%s ...\n", ms, pQInfo->sql);
if (ms < 1000) {
taosMsleep(ms);
} else {
int used_ms = 0;
while (used_ms < ms) {
taosMsleep(1000);
used_ms += 1000;
if (isTaskKilled(pQInfo)) {
printf("test check query is canceled, sleep break.%s\n", pQInfo->sql);
break;
}
}
}
}
return 1;
}
#endif
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int64_t threadId = taosGetSelfPthreadId();
*pRes = NULL;
int64_t curOwner = 0;
if ((curOwner = atomic_val_compare_exchange_64(&pTaskInfo->owner, 0, threadId)) != 0) {
qError("%s-%p execTask is now executed by thread:%p", GET_TASKID(pTaskInfo), pTaskInfo, (void*)curOwner);
pTaskInfo->code = TSDB_CODE_QRY_IN_EXEC;
return pTaskInfo->code;
}
if (pTaskInfo->cost.start == 0) {
pTaskInfo->cost.start = taosGetTimestampMs();
}
if (isTaskKilled(pTaskInfo)) {
qDebug("%s already killed, abort", GET_TASKID(pTaskInfo));
return TSDB_CODE_SUCCESS;
}
// error occurs, record the error code and return to client
int32_t ret = setjmp(pTaskInfo->env);
if (ret != TSDB_CODE_SUCCESS) {
pTaskInfo->code = ret;
cleanUpUdfs();
qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
atomic_store_64(&pTaskInfo->owner, 0);
return pTaskInfo->code;
}
qDebug("%s execTask is launched", GET_TASKID(pTaskInfo));
int64_t st = taosGetTimestampUs();
*pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot);
uint64_t el = (taosGetTimestampUs() - st);
pTaskInfo->cost.elapsedTime += el;
if (NULL == *pRes) {
*useconds = pTaskInfo->cost.elapsedTime;
}
cleanUpUdfs();
int32_t current = (*pRes != NULL) ? (*pRes)->info.rows : 0;
uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
qDebug("%s task suspended, %d rows returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
GET_TASKID(pTaskInfo), current, total, 0, el / 1000.0);
atomic_store_64(&pTaskInfo->owner, 0);
return pTaskInfo->code;
}
int32_t qKillTask(qTaskInfo_t qinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo;
if (pTaskInfo == NULL) {
return TSDB_CODE_QRY_INVALID_QHANDLE;
}
qAsyncKillTask(qinfo);
// Wait for the query executing thread being stopped/
// Once the query is stopped, the owner of qHandle will be cleared immediately.
while (pTaskInfo->owner != 0) {
taosMsleep(100);
}
return TSDB_CODE_SUCCESS;
}
int32_t qAsyncKillTask(qTaskInfo_t qinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo;
if (pTaskInfo == NULL) {
return TSDB_CODE_QRY_INVALID_QHANDLE;
}
qDebug("%s execTask async killed", GET_TASKID(pTaskInfo));
setTaskKilled(pTaskInfo);
return TSDB_CODE_SUCCESS;
}
void qDestroyTask(qTaskInfo_t qTaskHandle) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qTaskHandle;
if (pTaskInfo == NULL) {
return;
}
qDebug("%s execTask completed, numOfRows:%" PRId64, GET_TASKID(pTaskInfo), pTaskInfo->pRoot->resultInfo.totalRows);
queryCostStatis(pTaskInfo); // print the query cost summary
doDestroyTask(pTaskInfo);
}
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int32_t capacity = 0;
return getOperatorExplainExecInfo(pTaskInfo->pRoot, pRes, &capacity, resNum);
}
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len) {
SExecTaskInfo* pTaskInfo = (struct SExecTaskInfo*)tinfo;
if (pTaskInfo->pRoot == NULL) {
return TSDB_CODE_INVALID_PARA;
}
int32_t nOptrWithVal = 0;
int32_t code = encodeOperator(pTaskInfo->pRoot, pOutput, len, &nOptrWithVal);
if ((code == TSDB_CODE_SUCCESS) && (nOptrWithVal = 0)) {
taosMemoryFreeClear(*pOutput);
*len = 0;
}
return code;
}
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len) {
SExecTaskInfo* pTaskInfo = (struct SExecTaskInfo*)tinfo;
if (pTaskInfo == NULL || pInput == NULL || len == 0) {
return TSDB_CODE_INVALID_PARA;
}
return decodeOperator(pTaskInfo->pRoot, pInput, len);
}
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
while (1) {
uint8_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
*scanner = pOperator->info;
return 0;
} else {
ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
}
}
}
#if 0
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
taosWriteQitem(pTaskInfo->streamInfo.inputQueue->queue, pItem);
return 0;
}
#endif
int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
pTaskInfo->streamInfo.recoverStartVer = startVer;
pTaskInfo->streamInfo.recoverEndVer = endVer;
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE;
return 0;
}
void* qExtractReaderFromStreamScanner(void* scanner) {
SStreamScanInfo* pInfo = scanner;
return (void*)pInfo->tqReader;
}
const SSchemaWrapper* qExtractSchemaFromStreamScanner(void* scanner) {
SStreamScanInfo* pInfo = scanner;
return pInfo->tqReader->pSchemaWrapper;
}
const STqOffset* qExtractStatusFromStreamScanner(void* scanner) {
SStreamScanInfo* pInfo = scanner;
return &pInfo->offset;
}
void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
return pTaskInfo->streamInfo.metaBlk;
}
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
memcpy(pOffset, &pTaskInfo->streamInfo.lastStatus, sizeof(STqOffsetVal));
return 0;
}
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
pTaskInfo->streamInfo.prepareStatus = *pOffset;
if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
while (1) {
uint8_t type = pOperator->operatorType;
pOperator->status = OP_OPENED;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pInfo = pOperator->info;
if (pOffset->type == TMQ_OFFSET__LOG) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->dataReader);
pTSInfo->dataReader = NULL;
#if 0
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
qError("prepare scan ver %ld actual ver %ld, last %ld", pOffset->version,
pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
ASSERT(0);
}
#endif
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
return -1;
}
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
int64_t uid = pOffset->uid;
int64_t ts = pOffset->ts;
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
} else {
return -1;
}
}
/*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
/*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
#ifndef NDEBUG
qDebug("switch to next table %ld (cursor %d), %ld rows returned", uid, pTableScanInfo->currentTable,
pInfo->pTableScanOp->resultInfo.totalRows);
pInfo->pTableScanOp->resultInfo.totalRows = 0;
#endif
bool found = false;
for (int32_t i = 0; i < tableSz; i++) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
if (pTableInfo->uid == uid) {
found = true;
pTableScanInfo->currentTable = i;
break;
}
}
// TODO after dropping table, table may be not found
ASSERT(found);
if (pTableScanInfo->dataReader == NULL) {
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
pTableScanInfo->dataReader == NULL) {
ASSERT(0);
}
}
tsdbSetTableId(pTableScanInfo->dataReader, uid);
int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
pTableScanInfo->cond.twindows.skey = ts + 1;
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
pTableScanInfo->cond.twindows.skey = oldSkey;
pTableScanInfo->scanTimes = 0;
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
pTableScanInfo->currentTable, tableSz);
/*}*/
} else {
ASSERT(0);
}
return 0;
} else {
ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
}
}
}
return 0;
}
#if 0
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
}
}
return doPrepareScan(pTaskInfo->pRoot, uid, ts);
}
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
return doGetScanStatus(pTaskInfo->pRoot, uid, ts);
}
#endif

View File

@ -1,424 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "dataSinkMgt.h"
#include "os.h"
#include "tmsg.h"
#include "tref.h"
#include "tudf.h"
#include "executor.h"
#include "executorimpl.h"
#include "query.h"
static TdThreadOnce initPoolOnce = PTHREAD_ONCE_INIT;
int32_t exchangeObjRefPool = -1;
static void initRefPool() { exchangeObjRefPool = taosOpenRef(1024, doDestroyExchangeOperatorInfo); }
static void cleanupRefPool() {
int32_t ref = atomic_val_compare_exchange_32(&exchangeObjRefPool, exchangeObjRefPool, 0);
taosCloseRef(ref);
}
int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, SSubplan* pSubplan,
qTaskInfo_t* pTaskInfo, DataSinkHandle* handle, const char* sql, EOPTR_EXEC_MODEL model) {
assert(pSubplan != NULL);
SExecTaskInfo** pTask = (SExecTaskInfo**)pTaskInfo;
taosThreadOnce(&initPoolOnce, initRefPool);
atexit(cleanupRefPool);
int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 1000, .maxDataBlockNumPerQuery = 100};
code = dsDataSinkMgtInit(&cfg);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
if (handle) {
void* pSinkParam = NULL;
code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, pTaskInfo, readHandle);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
code = dsCreateDataSinker(pSubplan->pDataSink, handle, pSinkParam);
}
_error:
// if failed to add ref for all tables in this query, abort current query
return code;
}
#ifdef TEST_IMPL
// wait moment
int waitMoment(SQInfo* pQInfo) {
if (pQInfo->sql) {
int ms = 0;
char* pcnt = strstr(pQInfo->sql, " count(*)");
if (pcnt) return 0;
char* pos = strstr(pQInfo->sql, " t_");
if (pos) {
pos += 3;
ms = atoi(pos);
while (*pos >= '0' && *pos <= '9') {
pos++;
}
char unit_char = *pos;
if (unit_char == 'h') {
ms *= 3600 * 1000;
} else if (unit_char == 'm') {
ms *= 60 * 1000;
} else if (unit_char == 's') {
ms *= 1000;
}
}
if (ms == 0) return 0;
printf("test wait sleep %dms. sql=%s ...\n", ms, pQInfo->sql);
if (ms < 1000) {
taosMsleep(ms);
} else {
int used_ms = 0;
while (used_ms < ms) {
taosMsleep(1000);
used_ms += 1000;
if (isTaskKilled(pQInfo)) {
printf("test check query is canceled, sleep break.%s\n", pQInfo->sql);
break;
}
}
}
}
return 1;
}
#endif
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int64_t threadId = taosGetSelfPthreadId();
*pRes = NULL;
int64_t curOwner = 0;
if ((curOwner = atomic_val_compare_exchange_64(&pTaskInfo->owner, 0, threadId)) != 0) {
qError("%s-%p execTask is now executed by thread:%p", GET_TASKID(pTaskInfo), pTaskInfo, (void*)curOwner);
pTaskInfo->code = TSDB_CODE_QRY_IN_EXEC;
return pTaskInfo->code;
}
if (pTaskInfo->cost.start == 0) {
pTaskInfo->cost.start = taosGetTimestampMs();
}
if (isTaskKilled(pTaskInfo)) {
qDebug("%s already killed, abort", GET_TASKID(pTaskInfo));
return TSDB_CODE_SUCCESS;
}
// error occurs, record the error code and return to client
int32_t ret = setjmp(pTaskInfo->env);
if (ret != TSDB_CODE_SUCCESS) {
pTaskInfo->code = ret;
cleanUpUdfs();
qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
return pTaskInfo->code;
}
qDebug("%s execTask is launched", GET_TASKID(pTaskInfo));
int64_t st = taosGetTimestampUs();
*pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot);
uint64_t el = (taosGetTimestampUs() - st);
pTaskInfo->cost.elapsedTime += el;
if (NULL == *pRes) {
*useconds = pTaskInfo->cost.elapsedTime;
}
cleanUpUdfs();
int32_t current = (*pRes != NULL) ? (*pRes)->info.rows : 0;
uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
qDebug("%s task suspended, %d rows returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
GET_TASKID(pTaskInfo), current, total, 0, el / 1000.0);
atomic_store_64(&pTaskInfo->owner, 0);
return pTaskInfo->code;
}
int32_t qKillTask(qTaskInfo_t qinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo;
if (pTaskInfo == NULL) {
return TSDB_CODE_QRY_INVALID_QHANDLE;
}
qDebug("%s execTask killed", GET_TASKID(pTaskInfo));
setTaskKilled(pTaskInfo);
// Wait for the query executing thread being stopped/
// Once the query is stopped, the owner of qHandle will be cleared immediately.
while (pTaskInfo->owner != 0) {
taosMsleep(100);
}
return TSDB_CODE_SUCCESS;
}
int32_t qAsyncKillTask(qTaskInfo_t qinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qinfo;
if (pTaskInfo == NULL) {
return TSDB_CODE_QRY_INVALID_QHANDLE;
}
qDebug("%s execTask async killed", GET_TASKID(pTaskInfo));
setTaskKilled(pTaskInfo);
return TSDB_CODE_SUCCESS;
}
void qDestroyTask(qTaskInfo_t qTaskHandle) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)qTaskHandle;
if (pTaskInfo == NULL) {
return;
}
qDebug("%s execTask completed, numOfRows:%" PRId64, GET_TASKID(pTaskInfo), pTaskInfo->pRoot->resultInfo.totalRows);
queryCostStatis(pTaskInfo); // print the query cost summary
doDestroyTask(pTaskInfo);
}
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int32_t capacity = 0;
return getOperatorExplainExecInfo(pTaskInfo->pRoot, pRes, &capacity, resNum);
}
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len) {
SExecTaskInfo* pTaskInfo = (struct SExecTaskInfo*)tinfo;
if (pTaskInfo->pRoot == NULL) {
return TSDB_CODE_INVALID_PARA;
}
int32_t nOptrWithVal = 0;
int32_t code = encodeOperator(pTaskInfo->pRoot, pOutput, len, &nOptrWithVal);
if ((code == TSDB_CODE_SUCCESS) && (nOptrWithVal = 0)) {
taosMemoryFreeClear(*pOutput);
*len = 0;
}
return code;
}
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len) {
SExecTaskInfo* pTaskInfo = (struct SExecTaskInfo*)tinfo;
if (pTaskInfo == NULL || pInput == NULL || len == 0) {
return TSDB_CODE_INVALID_PARA;
}
return decodeOperator(pTaskInfo->pRoot, pInput, len);
}
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
while (1) {
uint8_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
*scanner = pOperator->info;
return 0;
} else {
ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
}
}
}
#if 0
int32_t qStreamInput(qTaskInfo_t tinfo, void* pItem) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
taosWriteQitem(pTaskInfo->streamInfo.inputQueue->queue, pItem);
return 0;
}
#endif
int32_t qStreamPrepareRecover(qTaskInfo_t tinfo, int64_t startVer, int64_t endVer) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM);
pTaskInfo->streamInfo.recoverStartVer = startVer;
pTaskInfo->streamInfo.recoverEndVer = endVer;
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__PREPARE;
return 0;
}
void* qExtractReaderFromStreamScanner(void* scanner) {
SStreamScanInfo* pInfo = scanner;
return (void*)pInfo->tqReader;
}
const SSchemaWrapper* qExtractSchemaFromStreamScanner(void* scanner) {
SStreamScanInfo* pInfo = scanner;
return pInfo->tqReader->pSchemaWrapper;
}
const STqOffset* qExtractStatusFromStreamScanner(void* scanner) {
SStreamScanInfo* pInfo = scanner;
return &pInfo->offset;
}
void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
return pTaskInfo->streamInfo.metaBlk;
}
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
memcpy(pOffset, &pTaskInfo->streamInfo.lastStatus, sizeof(STqOffsetVal));
return 0;
}
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
pTaskInfo->streamInfo.prepareStatus = *pOffset;
if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
while (1) {
uint8_t type = pOperator->operatorType;
pOperator->status = OP_OPENED;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pInfo = pOperator->info;
if (pOffset->type == TMQ_OFFSET__LOG) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->dataReader);
pTSInfo->dataReader = NULL;
#if 0
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
qError("prepare scan ver %ld actual ver %ld, last %ld", pOffset->version,
pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
ASSERT(0);
}
#endif
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
return -1;
}
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
int64_t uid = pOffset->uid;
int64_t ts = pOffset->ts;
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
} else {
return -1;
}
}
/*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
/*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
#ifndef NDEBUG
qDebug("switch to next table %ld (cursor %d), %ld rows returned", uid, pTableScanInfo->currentTable,
pInfo->pTableScanOp->resultInfo.totalRows);
pInfo->pTableScanOp->resultInfo.totalRows = 0;
#endif
bool found = false;
for (int32_t i = 0; i < tableSz; i++) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
if (pTableInfo->uid == uid) {
found = true;
pTableScanInfo->currentTable = i;
break;
}
}
// TODO after dropping table, table may be not found
ASSERT(found);
if (pTableScanInfo->dataReader == NULL) {
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
pTableScanInfo->dataReader == NULL) {
ASSERT(0);
}
}
tsdbSetTableId(pTableScanInfo->dataReader, uid);
int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
pTableScanInfo->cond.twindows.skey = ts + 1;
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
pTableScanInfo->cond.twindows.skey = oldSkey;
pTableScanInfo->scanTimes = 0;
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
pTableScanInfo->currentTable, tableSz);
/*}*/
} else {
ASSERT(0);
}
return 0;
} else {
ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
}
}
}
return 0;
}
#if 0
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
}
}
return doPrepareScan(pTaskInfo->pRoot, uid, ts);
}
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
return doGetScanStatus(pTaskInfo->pRoot, uid, ts);
}
#endif

View File

@ -1333,7 +1333,7 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowRes, bool keep); static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowRes, bool keep);
void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock) { void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, const SArray* pColMatchInfo) {
if (pFilterNode == NULL || pBlock->info.rows == 0) { if (pFilterNode == NULL || pBlock->info.rows == 0) {
return; return;
} }
@ -1354,6 +1354,20 @@ void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock) {
filterFreeInfo(filter); filterFreeInfo(filter);
extractQualifiedTupleByFilterResult(pBlock, rowRes, keep); extractQualifiedTupleByFilterResult(pBlock, rowRes, keep);
if (pColMatchInfo != NULL) {
for(int32_t i = 0; i < taosArrayGetSize(pColMatchInfo); ++i) {
SColMatchInfo* pInfo = taosArrayGet(pColMatchInfo, i);
if (pInfo->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, pInfo->targetSlotId);
if (pColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
blockDataUpdateTsWindow(pBlock, pInfo->targetSlotId);
break;
}
}
}
}
taosMemoryFree(rowRes); taosMemoryFree(rowRes);
} }
@ -3043,7 +3057,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
while (1) { while (1) {
doBuildResultDatablock(pOperator, pInfo, &pAggInfo->groupResInfo, pAggInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, pInfo, &pAggInfo->groupResInfo, pAggInfo->aggSup.pResultBuf);
doFilter(pAggInfo->pCondition, pInfo->pRes); doFilter(pAggInfo->pCondition, pInfo->pRes, NULL);
if (!hasDataInGroupInfo(&pAggInfo->groupResInfo)) { if (!hasDataInGroupInfo(&pAggInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator); doSetOperatorCompleted(pOperator);
@ -3209,6 +3223,7 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa
pLimitInfo->currentGroupId = pBlock->info.groupId; pLimitInfo->currentGroupId = pBlock->info.groupId;
} }
// here check for a new group data, we need to handle the data of the previous group.
if (pLimitInfo->currentGroupId != 0 && pLimitInfo->currentGroupId != pBlock->info.groupId) { if (pLimitInfo->currentGroupId != 0 && pLimitInfo->currentGroupId != pBlock->info.groupId) {
pLimitInfo->numOfOutputGroups += 1; pLimitInfo->numOfOutputGroups += 1;
if ((pLimitInfo->slimit.limit > 0) && (pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { if ((pLimitInfo->slimit.limit > 0) && (pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
@ -3221,6 +3236,11 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa
// reset the value for a new group data // reset the value for a new group data
pLimitInfo->numOfOutputRows = 0; pLimitInfo->numOfOutputRows = 0;
pLimitInfo->remainOffset = pLimitInfo->limit.offset; pLimitInfo->remainOffset = pLimitInfo->limit.offset;
// existing rows that belongs to previous group.
if (pBlock->info.rows > 0) {
return PROJECT_RETRIEVE_DONE;
}
} }
// here we reach the start position, according to the limit/offset requirements. // here we reach the start position, according to the limit/offset requirements.
@ -3265,7 +3285,9 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SExprSupp* pSup = &pOperator->exprSupp; SExprSupp* pSup = &pOperator->exprSupp;
SSDataBlock* pRes = pInfo->pRes; SSDataBlock* pRes = pInfo->pRes;
blockDataCleanup(pRes); SSDataBlock* pFinalRes = pProjectInfo->pFinalRes;
blockDataCleanup(pFinalRes);
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
@ -3276,24 +3298,6 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
return NULL; return NULL;
} }
#if 0
if (pProjectInfo->existDataBlock) { // TODO refactor
SSDataBlock* pBlock = pProjectInfo->existDataBlock;
pProjectInfo->existDataBlock = NULL;
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pInfo->pCtx, pBlock, TSDB_ORDER_ASC);
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
projectApplyFunctions(pOperator->exprSupp.pExprInfo, pInfo->pRes, pBlock, pInfo->pCtx, pOperator->exprSupp.numOfExprs);
if (pRes->info.rows >= pProjectInfo->binfo.capacity * 0.8) {
copyTsColoum(pRes, pInfo->pCtx, pOperator->exprSupp.numOfExprs);
resetResultRowEntryResult(pInfo->pCtx, pOperator->exprSupp.numOfExprs);
return pRes;
}
}
#endif
int64_t st = 0; int64_t st = 0;
int32_t order = 0; int32_t order = 0;
int32_t scanFlag = 0; int32_t scanFlag = 0;
@ -3303,67 +3307,132 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
} }
SOperatorInfo* downstream = pOperator->pDownstream[0]; SOperatorInfo* downstream = pOperator->pDownstream[0];
SLimitInfo* pLimitInfo = &pProjectInfo->limitInfo;
while (1) { while(1) {
// The downstream exec may change the value of the newgroup, so use a local variable instead. while (1) {
qDebug("projection call next"); blockDataCleanup(pRes);
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
qDebug("projection get null");
/*if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH) {*/ // The downstream exec may change the value of the newgroup, so use a local variable instead.
doSetOperatorCompleted(pOperator); SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
/*} else if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {*/ if (pBlock == NULL) {
/*pOperator->status = OP_RES_TO_RETURN;*/ doSetOperatorCompleted(pOperator);
/*}*/ break;
break; }
}
if (pBlock->info.type == STREAM_RETRIEVE) {
// for stream interval
return pBlock;
}
// the pDataBlock are always the same one, no need to call this again if (pBlock->info.type == STREAM_RETRIEVE) {
int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag); // for stream interval
if (code != TSDB_CODE_SUCCESS) { return pBlock;
longjmp(pTaskInfo->env, code); }
}
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false); if (pLimitInfo->remainGroupOffset > 0) {
blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows); if (pLimitInfo->currentGroupId == 0 || pLimitInfo->currentGroupId == pBlock->info.groupId) { // it is the first group
pLimitInfo->currentGroupId = pBlock->info.groupId;
continue;
} else if (pLimitInfo->currentGroupId != pBlock->info.groupId) {
// now it is the data from a new group
pLimitInfo->remainGroupOffset -= 1;
pLimitInfo->currentGroupId = pBlock->info.groupId;
code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, // ignore data block in current group
pProjectInfo->pPseudoColInfo); if (pLimitInfo->remainGroupOffset > 0) {
if (code != TSDB_CODE_SUCCESS) { continue;
longjmp(pTaskInfo->env, code); }
} }
int32_t status = handleLimitOffset(pOperator, &pProjectInfo->limitInfo, pInfo->pRes, true); // set current group id of the project operator
pLimitInfo->currentGroupId = pBlock->info.groupId;
}
// filter shall be applied after apply functions and limit/offset on the result // remainGroupOffset == 0
doFilter(pProjectInfo->pFilterNode, pInfo->pRes); // here check for a new group data, we need to handle the data of the previous group.
if (pLimitInfo->currentGroupId != 0 && pLimitInfo->currentGroupId != pBlock->info.groupId) {
pLimitInfo->numOfOutputGroups += 1;
if ((pLimitInfo->slimit.limit > 0) && (pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
doSetOperatorCompleted(pOperator);
break;
}
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) { // reset the value for a new group data
// existing rows that belongs to previous group.
pLimitInfo->numOfOutputRows = 0;
pLimitInfo->remainOffset = pLimitInfo->limit.offset;
}
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(pOperator->pDownstream[0], &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false);
blockDataEnsureCapacity(pInfo->pRes, pInfo->pRes->info.rows + pBlock->info.rows);
code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs,
pProjectInfo->pPseudoColInfo);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
// set current group id
pLimitInfo->currentGroupId = pBlock->info.groupId;
if (pLimitInfo->remainOffset >= pInfo->pRes->info.rows) {
pLimitInfo->remainOffset -= pInfo->pRes->info.rows;
blockDataCleanup(pInfo->pRes);
continue;
} else if (pLimitInfo->remainOffset < pInfo->pRes->info.rows && pLimitInfo->remainOffset > 0) {
blockDataTrimFirstNRows(pInfo->pRes, pLimitInfo->remainOffset);
pLimitInfo->remainOffset = 0;
}
// check for the limitation in each group
if (pLimitInfo->limit.limit >= 0 &&
pLimitInfo->numOfOutputRows + pInfo->pRes->info.rows >= pLimitInfo->limit.limit) {
int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
blockDataKeepFirstNRows(pInfo->pRes, keepRows);
if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) {
pOperator->status = OP_EXEC_DONE;
}
}
pLimitInfo->numOfOutputRows += pInfo->pRes->info.rows;
break; break;
} }
if (status == PROJECT_RETRIEVE_CONTINUE || pInfo->pRes->info.rows == 0) { // no results generated
continue; if (pInfo->pRes->info.rows == 0 || (!pProjectInfo->mergeDataBlocks)) {
} else if (status == PROJECT_RETRIEVE_DONE) { break;
}
if (pProjectInfo->mergeDataBlocks) {
pFinalRes->info.groupId = pInfo->pRes->info.groupId;
pFinalRes->info.version = pInfo->pRes->info.version;
// continue merge data, ignore the group id
blockDataMerge(pFinalRes, pInfo->pRes);
if (pFinalRes->info.rows + pInfo->pRes->info.rows <= pOperator->resultInfo.threshold) {
continue;
}
}
// do apply filter
SSDataBlock* p = pProjectInfo->mergeDataBlocks ? pFinalRes : pRes;
doFilter(pProjectInfo->pFilterNode, p, NULL);
if (p->info.rows > 0) {
break; break;
} }
} }
size_t rows = pInfo->pRes->info.rows; SSDataBlock* p = pProjectInfo->mergeDataBlocks ? pFinalRes : pRes;
pProjectInfo->limitInfo.numOfOutputRows += rows; pOperator->resultInfo.totalRows += p->info.rows;
pOperator->resultInfo.totalRows += rows;
if (pOperator->cost.openCost == 0) { if (pOperator->cost.openCost == 0) {
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
} }
return (rows > 0) ? pInfo->pRes : NULL; return (p->info.rows > 0) ? p : NULL;
} }
static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo* pInfo, SResultInfo* pResultInfo, static void doHandleRemainBlockForNewGroupImpl(SFillOperatorInfo* pInfo, SResultInfo* pResultInfo,
@ -3492,7 +3561,7 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) {
break; break;
} }
doFilter(pInfo->pCondition, fillResult); doFilter(pInfo->pCondition, fillResult, pInfo->pColMatchColInfo);
if (fillResult->info.rows > 0) { if (fillResult->info.rows > 0) {
break; break;
} }
@ -3755,6 +3824,7 @@ static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) {
cleanupAggSup(&pInfo->aggSup); cleanupAggSup(&pInfo->aggSup);
taosArrayDestroy(pInfo->pPseudoColInfo); taosArrayDestroy(pInfo->pPseudoColInfo);
blockDataDestroy(pInfo->pFinalRes);
taosMemoryFreeClear(param); taosMemoryFreeClear(param);
} }
@ -3814,7 +3884,10 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
initLimitInfo(pProjPhyNode->node.pLimit, pProjPhyNode->node.pSlimit, &pInfo->limitInfo); initLimitInfo(pProjPhyNode->node.pLimit, pProjPhyNode->node.pSlimit, &pInfo->limitInfo);
pInfo->binfo.pRes = pResBlock; pInfo->binfo.pRes = pResBlock;
pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
pInfo->pFilterNode = pProjPhyNode->node.pConditions; pInfo->pFilterNode = pProjPhyNode->node.pConditions;
pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock;
int32_t numOfRows = 4096; int32_t numOfRows = 4096;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
@ -3950,7 +4023,7 @@ static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
} }
} }
doFilter(pIndefInfo->pCondition, pInfo->pRes); doFilter(pIndefInfo->pCondition, pInfo->pRes, NULL);
size_t rows = pInfo->pRes->info.rows; size_t rows = pInfo->pRes->info.rows;
if (rows > 0 || pOperator->status == OP_EXEC_DONE) { if (rows > 0 || pOperator->status == OP_EXEC_DONE) {
break; break;
@ -4134,9 +4207,6 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT
return pTaskInfo; return pTaskInfo;
} }
static STsdbReader* doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, const char* idstr);
static SArray* extractColumnInfo(SNodeList* pNodeList); static SArray* extractColumnInfo(SNodeList* pNodeList);
SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode); SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode);
@ -4177,9 +4247,11 @@ int32_t extractTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNode,
} }
SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) { SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) {
int32_t numOfCols = LIST_LENGTH(pScanNode->pScanCols); int32_t numOfCols = LIST_LENGTH(pScanNode->pScanCols);
int32_t numOfTags = LIST_LENGTH(pScanNode->pScanPseudoCols);
SSchemaWrapper* pqSw = taosMemoryCalloc(1, sizeof(SSchemaWrapper)); SSchemaWrapper* pqSw = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
pqSw->pSchema = taosMemoryCalloc(numOfCols, sizeof(SSchema)); pqSw->pSchema = taosMemoryCalloc(numOfCols + numOfTags, sizeof(SSchema));
for (int32_t i = 0; i < numOfCols; ++i) { for (int32_t i = 0; i < numOfCols; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanCols, i); STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanCols, i);
@ -4192,6 +4264,22 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) {
strncpy(pSchema->name, pColNode->colName, tListLen(pSchema->name)); strncpy(pSchema->name, pColNode->colName, tListLen(pSchema->name));
} }
// this the tags and pseudo function columns, we only keep the tag columns
for(int32_t i = 0; i < numOfTags; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pScanNode->pScanPseudoCols, i);
int32_t type = nodeType(pNode->pExpr);
if (type == QUERY_NODE_COLUMN) {
SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
SSchema* pSchema = &pqSw->pSchema[pqSw->nCols++];
pSchema->colId = pColNode->colId;
pSchema->type = pColNode->node.resType.type;
pSchema->type = pColNode->node.resType.bytes;
strncpy(pSchema->name, pColNode->colName, tListLen(pSchema->name));
}
}
return pqSw; return pqSw;
} }
@ -4293,69 +4381,15 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
int32_t groupNum = 0; int32_t groupNum = 0;
for (int32_t i = 0; i < taosArrayGetSize(pTableListInfo->pTableList); i++) { for (int32_t i = 0; i < taosArrayGetSize(pTableListInfo->pTableList); i++) {
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i); STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
SMetaReader mr = {0}; int32_t code = getGroupIdFromTagsVal(pHandle->meta, info->uid, group, keyBuf, &info->groupId);
metaReaderInit(&mr, pHandle->meta, 0); if (code != TSDB_CODE_SUCCESS) {
metaGetTableEntryByUid(&mr, info->uid); return code;
SNodeList* groupNew = nodesCloneList(group);
nodesRewriteExprsPostOrder(groupNew, doTranslateTagExpr, &mr);
char* isNull = (char*)keyBuf;
char* pStart = (char*)keyBuf + nullFlagSize;
SNode* pNode;
int32_t index = 0;
FOREACH(pNode, groupNew) {
SNode* pNew = NULL;
int32_t code = scalarCalculateConstants(pNode, &pNew);
if (TSDB_CODE_SUCCESS == code) {
REPLACE_NODE(pNew);
} else {
taosMemoryFree(keyBuf);
nodesDestroyList(groupNew);
metaReaderClear(&mr);
return code;
}
ASSERT(nodeType(pNew) == QUERY_NODE_VALUE);
SValueNode* pValue = (SValueNode*)pNew;
if (pValue->node.resType.type == TSDB_DATA_TYPE_NULL || pValue->isNull) {
isNull[index++] = 1;
continue;
} else {
isNull[index++] = 0;
char* data = nodesGetValueFromNode(pValue);
if (pValue->node.resType.type == TSDB_DATA_TYPE_JSON) {
if (tTagIsJson(data)) {
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
taosMemoryFree(keyBuf);
nodesDestroyList(groupNew);
metaReaderClear(&mr);
return terrno;
}
int32_t len = getJsonValueLen(data);
memcpy(pStart, data, len);
pStart += len;
} else if (IS_VAR_DATA_TYPE(pValue->node.resType.type)) {
memcpy(pStart, data, varDataTLen(data));
pStart += varDataTLen(data);
} else {
memcpy(pStart, data, pValue->node.resType.bytes);
pStart += pValue->node.resType.bytes;
}
}
} }
int32_t len = (int32_t)(pStart - (char*)keyBuf); taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t));
uint64_t groupId = calcGroupId(keyBuf, len);
taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &groupId, sizeof(uint64_t));
info->groupId = groupId;
groupNum++; groupNum++;
nodesDestroyList(groupNew);
metaReaderClear(&mr);
} }
taosMemoryFree(keyBuf); taosMemoryFree(keyBuf);
if (pTableListInfo->needSortTableByGroupId) { if (pTableListInfo->needSortTableByGroupId) {
@ -4443,12 +4477,6 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return createExchangeOperatorInfo(pHandle->pMsgCb->clientRpc, (SExchangePhysiNode*)pPhyNode, pTaskInfo); return createExchangeOperatorInfo(pHandle->pMsgCb->clientRpc, (SExchangePhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) { } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode; STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
STimeWindowAggSupp aggSup = (STimeWindowAggSupp){
.waterMark = pTableScanNode->watermark,
.calTrigger = pTableScanNode->triggerType,
.maxTs = INT64_MIN,
};
if (pHandle->vnode) { if (pHandle->vnode) {
int32_t code = int32_t code =
createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort, createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort,
@ -4468,7 +4496,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
#endif #endif
pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan); pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan);
SOperatorInfo* pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, &aggSup, pTaskInfo); SOperatorInfo* pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTaskInfo);
return pOperator; return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) { } else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) {

View File

@ -299,7 +299,7 @@ static SSDataBlock* buildGroupResultDataBlock(SOperatorInfo* pOperator) {
SSDataBlock* pRes = pInfo->binfo.pRes; SSDataBlock* pRes = pInfo->binfo.pRes;
while(1) { while(1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pRes); doFilter(pInfo->pCondition, pRes, NULL);
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo); bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
if (!hasRemain) { if (!hasRemain) {

View File

@ -211,7 +211,7 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
break; break;
} }
if (pJoinInfo->pCondAfterMerge != NULL) { if (pJoinInfo->pCondAfterMerge != NULL) {
doFilter(pJoinInfo->pCondAfterMerge, pRes); doFilter(pJoinInfo->pCondAfterMerge, pRes, NULL);
} }
if (pRes->info.rows >= pOperator->resultInfo.threshold) { if (pRes->info.rows >= pOperator->resultInfo.threshold) {
break; break;

View File

@ -264,7 +264,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
} }
int64_t st = taosGetTimestampMs(); int64_t st = taosGetTimestampMs();
doFilter(pTableScanInfo->pFilterNode, pBlock); doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo);
int64_t et = taosGetTimestampMs(); int64_t et = taosGetTimestampMs();
pTableScanInfo->readRecorder.filterTime += (et - st); pTableScanInfo->readRecorder.filterTime += (et - st);
@ -273,6 +273,8 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
pCost->filterOutBlocks += 1; pCost->filterOutBlocks += 1;
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
} else {
qDebug("%s data block filter out, elapsed time:%"PRId64, GET_TASKID(pTaskInfo), (et - st));
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -1134,7 +1136,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
} }
} }
doFilter(pInfo->pCondition, pInfo->pRes); doFilter(pInfo->pCondition, pInfo->pRes, NULL);
blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex); blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex);
blockDataFreeRes((SSDataBlock*)pBlock); blockDataFreeRes((SSDataBlock*)pBlock);
return 0; return 0;
@ -1415,7 +1417,7 @@ static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
} }
SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond, SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond,
STimeWindowAggSupp* pTwSup, SExecTaskInfo* pTaskInfo) { SExecTaskInfo* pTaskInfo) {
SStreamScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamScanInfo)); SStreamScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
@ -1428,8 +1430,12 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc;
pInfo->pTagCond = pTagCond; pInfo->pTagCond = pTagCond;
pInfo->pGroupTags = pTableScanNode->pGroupTags;
pInfo->twAggSup = *pTwSup; pInfo->twAggSup = (STimeWindowAggSupp){
.waterMark = pTableScanNode->watermark,
.calTrigger = pTableScanNode->triggerType,
.maxTs = INT64_MIN,
};
int32_t numOfCols = 0; int32_t numOfCols = 0;
pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID); pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
@ -1641,55 +1647,7 @@ static SSDataBlock* doFilterResult(SSysTableScanInfo* pInfo) {
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes; return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
} }
doFilter(pInfo->pCondition, pInfo->pRes); doFilter(pInfo->pCondition, pInfo->pRes, NULL);
#if 0
SFilterInfo* filter = NULL;
int32_t code = filterInitFromNode(pInfo->pCondition, &filter, 0);
SFilterColumnParam param1 = {.numOfCols = pInfo->pRes->info.numOfCols, .pDataBlock = pInfo->pRes->pDataBlock};
code = filterSetDataFromSlotId(filter, &param1);
int8_t* rowRes = NULL;
bool keep = filterExecute(filter, pInfo->pRes, &rowRes, NULL, param1.numOfCols);
filterFreeInfo(filter);
SSDataBlock* px = createOneDataBlock(pInfo->pRes, false);
blockDataEnsureCapacity(px, pInfo->pRes->info.rows);
// TODO refactor
int32_t numOfRow = 0;
for (int32_t i = 0; i < pInfo->pRes->info.numOfCols; ++i) {
SColumnInfoData* pDest = taosArrayGet(px->pDataBlock, i);
SColumnInfoData* pSrc = taosArrayGet(pInfo->pRes->pDataBlock, i);
if (keep) {
colDataAssign(pDest, pSrc, pInfo->pRes->info.rows, &px->info);
numOfRow = pInfo->pRes->info.rows;
} else if (NULL != rowRes) {
numOfRow = 0;
for (int32_t j = 0; j < pInfo->pRes->info.rows; ++j) {
if (rowRes[j] == 0) {
continue;
}
if (colDataIsNull_s(pSrc, j)) {
colDataAppendNULL(pDest, numOfRow);
} else {
colDataAppend(pDest, numOfRow, colDataGetData(pSrc, j), false);
}
numOfRow += 1;
}
} else {
numOfRow = 0;
}
}
px->info.rows = numOfRow;
pInfo->pRes = px;
#endif
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes; return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
} }
@ -2657,7 +2615,7 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc
} }
int64_t st = taosGetTimestampMs(); int64_t st = taosGetTimestampMs();
doFilter(pTableScanInfo->pFilterNode, pBlock); doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo);
int64_t et = taosGetTimestampMs(); int64_t et = taosGetTimestampMs();
pTableScanInfo->readRecorder.filterTime += (et - st); pTableScanInfo->readRecorder.filterTime += (et - st);

View File

@ -216,7 +216,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
return NULL; return NULL;
} }
doFilter(pInfo->pCondition, pBlock); doFilter(pInfo->pCondition, pBlock, pInfo->pColMatchInfo);
if (blockDataGetNumOfRows(pBlock) == 0) { if (blockDataGetNumOfRows(pBlock) == 0) {
continue; continue;
} }

View File

@ -1178,7 +1178,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_RES_TO_RETURN) { if (pOperator->status == OP_RES_TO_RETURN) {
while (1) { while (1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBInfo->pRes); doFilter(pInfo->pCondition, pBInfo->pRes, NULL);
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo); bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
if (!hasRemain) { if (!hasRemain) {
@ -1219,7 +1219,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
while (1) { while (1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBInfo->pRes); doFilter(pInfo->pCondition, pBInfo->pRes, NULL);
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo); bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
if (!hasRemain) { if (!hasRemain) {
@ -1256,7 +1256,7 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) {
blockDataEnsureCapacity(pBlock, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pBlock, pOperator->resultInfo.capacity);
while (1) { while (1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBlock); doFilter(pInfo->pCondition, pBlock, NULL);
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo); bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
if (!hasRemain) { if (!hasRemain) {
@ -1970,7 +1970,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_RES_TO_RETURN) { if (pOperator->status == OP_RES_TO_RETURN) {
while (1) { while (1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBInfo->pRes); doFilter(pInfo->pCondition, pBInfo->pRes, NULL);
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo); bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
if (!hasRemain) { if (!hasRemain) {
@ -2014,7 +2014,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity);
while (1) { while (1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBInfo->pRes); doFilter(pInfo->pCondition, pBInfo->pRes, NULL);
bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo); bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
if (!hasRemain) { if (!hasRemain) {
@ -4638,7 +4638,7 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
getTableScanInfo(pOperator, &iaInfo->order, &scanFlag); getTableScanInfo(pOperator, &iaInfo->order, &scanFlag);
setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->order, scanFlag, true); setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->order, scanFlag, true);
doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes); doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
doFilter(miaInfo->pCondition, pRes); doFilter(miaInfo->pCondition, pRes, NULL);
if (pRes->info.rows >= pOperator->resultInfo.capacity) { if (pRes->info.rows >= pOperator->resultInfo.capacity) {
break; break;
} }

View File

@ -708,7 +708,7 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES); taosHashRemove(ctx.pRes, (void *)&pNode, POINTER_BYTES);
} }
sifFreeRes(ctx.pRes); sifFreeRes(ctx.pRes);
SIF_RET(code); SIF_RET(code);
} }

Binary file not shown.

View File

@ -196,6 +196,10 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle); \ CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle); \
transClearBuffer(&conn->readBuf); \ transClearBuffer(&conn->readBuf); \
transFreeMsg(transContFromHead((char*)head)); \ transFreeMsg(transContFromHead((char*)head)); \
if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) { \
SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0); \
if (cliMsg->type == Release) return; \
} \
tDebug("%s conn %p receive release request, ref:%d", CONN_GET_INST_LABEL(conn), conn, T_REF_VAL_GET(conn)); \ tDebug("%s conn %p receive release request, ref:%d", CONN_GET_INST_LABEL(conn), conn, T_REF_VAL_GET(conn)); \
if (T_REF_VAL_GET(conn) > 1) { \ if (T_REF_VAL_GET(conn) > 1) { \
transUnrefCliHandle(conn); \ transUnrefCliHandle(conn); \

View File

@ -149,34 +149,35 @@ static void* transAcceptThread(void* arg);
static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName); static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName);
static bool addHandleToAcceptloop(void* arg); static bool addHandleToAcceptloop(void* arg);
#define CONN_SHOULD_RELEASE(conn, head) \ #define CONN_SHOULD_RELEASE(conn, head) \
do { \ do { \
if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \ if ((head)->release == 1 && (head->msgLen) == sizeof(*head)) { \
reallocConnRef(conn); \ reallocConnRef(conn); \
tTrace("conn %p received release request", conn); \ tTrace("conn %p received release request", conn); \
\ \
STraceId traceId = head->traceId; \ STraceId traceId = head->traceId; \
conn->status = ConnRelease; \ conn->status = ConnRelease; \
transClearBuffer(&conn->readBuf); \ transClearBuffer(&conn->readBuf); \
transFreeMsg(transContFromHead((char*)head)); \ transFreeMsg(transContFromHead((char*)head)); \
\ \
STransMsg tmsg = {.code = 0, .info.handle = (void*)conn, .info.traceId = traceId, .info.ahandle = NULL}; \ STransMsg tmsg = { \
SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \ .code = 0, .info.handle = (void*)conn, .info.traceId = traceId, .info.ahandle = (void*)0x9527}; \
srvMsg->msg = tmsg; \ SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg)); \
srvMsg->type = Release; \ srvMsg->msg = tmsg; \
srvMsg->pConn = conn; \ srvMsg->type = Release; \
if (!transQueuePush(&conn->srvMsgs, srvMsg)) { \ srvMsg->pConn = conn; \
return; \ if (!transQueuePush(&conn->srvMsgs, srvMsg)) { \
} \ return; \
if (conn->regArg.init) { \ } \
tTrace("conn %p release, notify server app", conn); \ if (conn->regArg.init) { \
STrans* pTransInst = conn->pTransInst; \ tTrace("conn %p release, notify server app", conn); \
(*pTransInst->cfp)(pTransInst->parent, &(conn->regArg.msg), NULL); \ STrans* pTransInst = conn->pTransInst; \
memset(&conn->regArg, 0, sizeof(conn->regArg)); \ (*pTransInst->cfp)(pTransInst->parent, &(conn->regArg.msg), NULL); \
} \ memset(&conn->regArg, 0, sizeof(conn->regArg)); \
uvStartSendRespInternal(srvMsg); \ } \
return; \ uvStartSendRespInternal(srvMsg); \
} \ return; \
} \
} while (0) } while (0)
#define SRV_RELEASE_UV(loop) \ #define SRV_RELEASE_UV(loop) \
@ -396,11 +397,11 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
if (pConn->status == ConnNormal) { if (pConn->status == ConnNormal) {
pHead->msgType = (0 == pMsg->msgType ? pConn->inType + 1 : pMsg->msgType); pHead->msgType = (0 == pMsg->msgType ? pConn->inType + 1 : pMsg->msgType);
if (smsg->type == Release) pHead->msgType = 0;
} else { } else {
if (smsg->type == Release) { if (smsg->type == Release) {
pHead->msgType = 0; pHead->msgType = 0;
pConn->status = ConnNormal; pConn->status = ConnNormal;
destroyConnRegArg(pConn); destroyConnRegArg(pConn);
transUnrefSrvHandle(pConn); transUnrefSrvHandle(pConn);
} else { } else {

View File

@ -457,14 +457,14 @@ class TDCom:
def newcon(self,host='localhost',port=6030,user='root',password='taosdata'): def newcon(self,host='localhost',port=6030,user='root',password='taosdata'):
con=taos.connect(host=host, user=user, password=password, port=port) con=taos.connect(host=host, user=user, password=password, port=port)
print(con) # print(con)
return con return con
def newcur(self,host='localhost',port=6030,user='root',password='taosdata'): def newcur(self,host='localhost',port=6030,user='root',password='taosdata'):
cfgPath = self.getClientCfgPath() cfgPath = self.getClientCfgPath()
con=taos.connect(host=host, user=user, password=password, config=cfgPath, port=port) con=taos.connect(host=host, user=user, password=password, config=cfgPath, port=port)
cur=con.cursor() cur=con.cursor()
print(cur) # print(cur)
return cur return cur
def newTdSql(self, host='localhost',port=6030,user='root',password='taosdata'): def newTdSql(self, host='localhost',port=6030,user='root',password='taosdata'):

View File

@ -99,7 +99,7 @@
./test.sh -f tsim/parser/commit.sim ./test.sh -f tsim/parser/commit.sim
# TD-17661 ./test.sh -f tsim/parser/condition.sim # TD-17661 ./test.sh -f tsim/parser/condition.sim
./test.sh -f tsim/parser/constCol.sim ./test.sh -f tsim/parser/constCol.sim
./test.sh -f tsim/parser/create_db.sim #./test.sh -f tsim/parser/create_db.sim
./test.sh -f tsim/parser/create_mt.sim ./test.sh -f tsim/parser/create_mt.sim
# TD-17653 ./test.sh -f tsim/parser/create_tb_with_tag_name.sim # TD-17653 ./test.sh -f tsim/parser/create_tb_with_tag_name.sim
./test.sh -f tsim/parser/create_tb.sim ./test.sh -f tsim/parser/create_tb.sim
@ -223,7 +223,7 @@
# ---- stream # ---- stream
./test.sh -f tsim/stream/basic0.sim ./test.sh -f tsim/stream/basic0.sim
./test.sh -f tsim/stream/basic1.sim #./test.sh -f tsim/stream/basic1.sim
./test.sh -f tsim/stream/basic2.sim ./test.sh -f tsim/stream/basic2.sim
./test.sh -f tsim/stream/drop_stream.sim ./test.sh -f tsim/stream/drop_stream.sim
./test.sh -f tsim/stream/distributeInterval0.sim ./test.sh -f tsim/stream/distributeInterval0.sim

View File

@ -960,6 +960,7 @@ endi
sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) ; sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) ;
if $rows != 21749 then if $rows != 21749 then
print expect 21749, actual: $rows
return -1 return -1
endi endi

View File

@ -0,0 +1,138 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import time
import socket
import subprocess
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,179 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import time
import socket
import subprocess
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
self.replica = 1
self.vgroups = 2
self.tb_nums = 10
self.row_nums = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def create_db_replica_1_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
'''
)
for i in range(tb_nums):
sub_tbname = "sub_tb_{}".format(i)
tdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + 1000*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== create database {} and insert rows execute end =====".format(dbname))
def check_insert_status(self, dbname, tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,'stb1'))
tdSql.checkData(0 , 0 , tb_nums*row_nums)
tdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1'))
tdSql.checkRows(tb_nums)
def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
self.create_db_replica_1_insertdatas(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)
self.check_insert_status(self.db_name , self.tb_nums , self.row_nums)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,179 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import time
import socket
import subprocess
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 2
self.tb_nums = 10
self.row_nums = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
'''
)
for i in range(tb_nums):
sub_tbname = "sub_tb_{}".format(i)
tdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + 1000*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== create database {} and insert rows execute end =====".format(dbname))
def check_insert_status(self, dbname, tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,'stb1'))
tdSql.checkData(0 , 0 , tb_nums*row_nums)
tdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1'))
tdSql.checkRows(tb_nums)
def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
self.create_db_replica_3_insertdatas(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)
self.check_insert_status(self.db_name , self.tb_nums , self.row_nums)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,496 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import datetime
import inspect
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
self.current_thread = None
self.max_restart_time = 10
self.try_check_times = 10
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
db_name = "sync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
# append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
start = time.time()
tdDnodes=cluster.dnodes
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
tdDnodes[self.stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
db_name = "unsync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
self.current_thread.join()
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
self.create_db_check_vgroups()
self.sync_run_case()
# self.unsync_run_case()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,497 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import datetime
import inspect
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
self.current_thread = None
self.max_restart_time = 10
self.try_check_times = 10
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
db_name = "sync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
# append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
start = time.time()
tdDnodes=cluster.dnodes
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
tdDnodes[self.stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
db_name = "unsync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
self.current_thread.join()
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
self.create_db_check_vgroups()
# self.sync_run_case()
self.unsync_run_case()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,521 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import datetime
import inspect
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
self.current_thread = None
self.max_restart_time = 10
self.try_check_times = 10
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
db_name = "sync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].forcestop()
self.wait_stop_dnode_OK()
# append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
start = time.time()
tdDnodes=cluster.dnodes
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
while not self.stop_dnode_id:
time.sleep(0.5)
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
# force stop taosd by kill -9
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
db_name = "unsync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
self.current_thread.join()
def force_stop_dnode(self, dnode_id ):
tdSql.query("show dnodes")
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
break
else:
continue
if port:
tdLog.info(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
# print(ps_kill_taosd)
os.system(ps_kill_taosd)
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
self.create_db_check_vgroups()
# self.sync_run_case()
self.unsync_run_case()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,558 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 10
self.current_thread = None
self.max_restart_time = 5
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def get_leader_infos(self ,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
leader_infos = set()
for vgroup_info in vgroup_infos:
leader_infos.add(vgroup_info[3:-4])
return leader_infos
def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos):
check_status = False
vote_act = set(set(after_leader_infos)-set(before_leader_infos))
if not vote_act:
print("=======before_revote_leader_infos ======\n" , before_leader_infos)
print("=======after_revote_leader_infos ======\n" , after_leader_infos)
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
tdLog.info(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
check_status = True
elif vgroup_info[ind+1] !="offline":
tdLog.info(" === dnode {} should be offline ".format(self.stop_dnode_id))
else:
continue
break
return check_status
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
db_name = "sync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# get leader info before stop
before_leader_infos = self.get_leader_infos(db_name)
# begin stop dnode
tdDnodes[self.stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
# vote leaders check
# get leader info after stop
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
# append rows of stablename when dnode stop make sure revote leaders
while not revote_status:
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
if revote_status:
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
else:
tdLog.info("===== leader of database {} is not ok , append rows fail =====".format(db_name))
# begin start dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
tdDnodes=cluster.dnodes
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
tdDnodes[self.stop_dnode_id-1].stoptaosd()
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# # get leader info before stop
# before_leader_infos = self.get_leader_infos(db_name)
# self.wait_stop_dnode_OK()
# check revote leader when restart servers
# # get leader info after stop
# after_leader_infos = self.get_leader_infos(db_name)
# revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
# # append rows of stablename when dnode stop make sure revote leaders
# while not revote_status:
# after_leader_infos = self.get_leader_infos(db_name)
# revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
tdDnodes[self.stop_dnode_id-1].starttaosd()
start = time.time()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
for loop in range(self.loop_restart_times):
db_name = "unsync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
self.current_thread.join()
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
self.create_db_check_vgroups()
self.sync_run_case()
# self.unsync_run_case()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,580 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 10
self.current_thread = None
self.max_restart_time = 5
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def get_leader_infos(self ,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
leader_infos = set()
for vgroup_info in vgroup_infos:
leader_infos.add(vgroup_info[3:-4])
return leader_infos
def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos):
check_status = False
vote_act = set(set(after_leader_infos)-set(before_leader_infos))
if not vote_act:
print("=======before_revote_leader_infos ======\n" , before_leader_infos)
print("=======after_revote_leader_infos ======\n" , after_leader_infos)
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
tdLog.info(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
check_status = True
elif vgroup_info[ind+1] !="offline":
tdLog.info(" === dnode {} should be offline ".format(self.stop_dnode_id))
else:
continue
break
return check_status
def force_stop_dnode(self, dnode_id ):
tdSql.query("show dnodes")
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
break
else:
continue
if port:
tdLog.info(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
# print(ps_kill_taosd)
os.system(ps_kill_taosd)
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
db_name = "sync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# get leader info before stop
before_leader_infos = self.get_leader_infos(db_name)
# begin stop dnode
# force stop taosd by kill -9
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
# vote leaders check
# get leader info after stop
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
# append rows of stablename when dnode stop make sure revote leaders
while not revote_status:
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
if revote_status:
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
else:
tdLog.info("===== leader of database {} is not ok , append rows fail =====".format(db_name))
# begin start dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
tdDnodes=cluster.dnodes
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
# force stop taosd by kill -9
# get leader info before stop
before_leader_infos = self.get_leader_infos(db_name)
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
# check revote leader when restart servers
# get leader info after stop
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
# append rows of stablename when dnode stop make sure revote leaders
while not revote_status:
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
tdDnodes[self.stop_dnode_id-1].starttaosd()
start = time.time()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
for loop in range(self.loop_restart_times):
db_name = "unsync_db_{}".format(loop)
stablename = 'stable_{}'.format(loop)
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
self.current_thread.join()
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
self.create_db_check_vgroups()
# self.sync_run_case()
self.unsync_run_case()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,206 @@
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import time
import socket
import subprocess
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
self.replica = 1
self.vgroups = 2
self.tb_nums = 10
self.row_nums = 100
self.max_vote_time_cost = 10 # seconds
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def check_vgroups_init_done(self,dbname):
status = True
tdSql.query("show {}.vgroups".format(dbname))
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
vgroup_status = []
for ind , role in enumerate(vgroup_info[3:-4]):
if ind%2==0:
continue
else:
vgroup_status.append(role)
if vgroup_status.count("leader")!=1 or vgroup_status.count("follower")!=2:
status = False
return status
return status
def vote_leader_time_costs(self,dbname):
start = time.time()
status = self.check_vgroups_init_done(dbname)
while not status:
time.sleep(0.1)
status = self.check_vgroups_init_done(dbname)
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
tdLog.info(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
return cost_time
def test_init_vgroups_time_costs(self):
tdLog.info(" ====start check time cost about vgroups vote leaders ==== ")
tdLog.info(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
# create database replica 3 vgroups 1
db1 = 'db_1'
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
tdLog.info('=======database {} replica 3 vgroups 1 ======'.format(db1))
tdSql.execute(create_db_replica_3_vgroups_1)
self.vote_leader_time_costs(db1)
# create database replica 3 vgroups 10
db2 = 'db_2'
create_db_replica_3_vgroups_10 = "create database {} replica 3 vgroups 10".format(db2)
tdLog.info('=======database {} replica 3 vgroups 10 ======'.format(db2))
tdSql.execute(create_db_replica_3_vgroups_10)
self.vote_leader_time_costs(db2)
# create database replica 3 vgroups 100
db3 = 'db_3'
create_db_replica_3_vgroups_100 = "create database {} replica 3 vgroups 100".format(db3)
tdLog.info('=======database {} replica 3 vgroups 100 ======'.format(db3))
tdSql.execute(create_db_replica_3_vgroups_100)
self.vote_leader_time_costs(db3)
def run(self):
self.check_setup_cluster_status()
self.test_init_vgroups_time_costs()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,364 @@
# author : wenzhouwww
from errno import ESOCKTNOSUPPORT
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import time
import random
import socket
import subprocess
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
self.replica = 1
self.vgroups = 2
self.tb_nums = 10
self.row_nums = 100
self.max_vote_time_cost = 10 # seconds
self.stop_dnode = None
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_db_check_vgroups(self):
tdSql.execute("drop database if exists test")
tdSql.execute("create database if not exists test replica 1 duration 300")
tdSql.execute("use test")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.query("show tables")
tdSql.checkRows(6)
tdSql.query("show test.vgroups;")
vgroups_infos = {} # key is id: value is info list
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
tmp_list = []
for role in vgroup_info[3:-4]:
if role in ['leader','follower']:
tmp_list.append(role)
vgroups_infos[vgroup_id]=tmp_list
for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
def _get_stop_dnode(self):
only_dnode_list = self.dnode_list.keys() - self.mnode_list.keys()
self.stop_dnode = random.sample(only_dnode_list , 1 )[0]
return self.stop_dnode
def check_vgroups_revote_leader(self,dbname):
status = True
stop_dnode_id = self.dnode_list[self.stop_dnode][0]
tdSql.query("show {}.vgroups".format(dbname))
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
vgroup_status = []
vgroups_leader_follower = vgroup_info[3:-4]
for ind , role in enumerate(vgroups_leader_follower):
if ind%2==0:
if role == stop_dnode_id and vgroups_leader_follower[ind+1]=="offline":
tdLog.info("====== dnode {} has offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
elif role == stop_dnode_id :
tdLog.exit("====== dnode {} has not offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
else:
continue
else:
vgroup_status.append(role)
if vgroup_status.count("leader")!=1 or vgroup_status.count("follower")!=1 or vgroup_status.count("offline")!=1:
status = False
return status
return status
def wait_stop_dnode_OK(self):
def _get_status():
status = ""
tdSql.query("show dnodes")
dnode_infos = tdSql.queryResult
for dnode_info in dnode_infos:
endpoint = dnode_info[1]
dnode_status = dnode_info[4]
if endpoint == self.stop_dnode:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , endpoint is {}".format(self.stop_dnode))
def wait_start_dnode_OK(self):
def _get_status():
status = ""
tdSql.query("show dnodes")
dnode_infos = tdSql.queryResult
for dnode_info in dnode_infos:
endpoint = dnode_info[1]
dnode_status = dnode_info[4]
if endpoint == self.stop_dnode:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , endpoint is {}".format(self.stop_dnode))
def random_stop_One_dnode(self):
self.stop_dnode = self._get_stop_dnode()
stop_dnode_id = self.dnode_list[self.stop_dnode][0]
tdLog.info(" ==== dnode {} will offline ,endpoints is {} ====".format(stop_dnode_id , self.stop_dnode))
tdDnodes=cluster.dnodes
tdDnodes[stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
# os.system("taos -s 'show dnodes;'")
def Restart_stop_dnode(self):
tdDnodes=cluster.dnodes
stop_dnode_id = self.dnode_list[self.stop_dnode][0]
tdDnodes[stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
# os.system("taos -s 'show dnodes;'")
def check_vgroups_init_done(self,dbname):
status = True
tdSql.query("show {}.vgroups".format(dbname))
for vgroup_info in tdSql.queryResult:
vgroup_id = vgroup_info[0]
vgroup_status = []
for ind , role in enumerate(vgroup_info[3:-4]):
if ind%2==0:
continue
else:
vgroup_status.append(role)
if vgroup_status.count("leader")!=1 or vgroup_status.count("follower")!=2:
status = False
return status
return status
def vote_leader_time_costs(self,dbname):
start = time.time()
status = self.check_vgroups_init_done(dbname)
while not status:
time.sleep(0.1)
status = self.check_vgroups_init_done(dbname)
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
tdLog.info(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
return cost_time
def revote_leader_time_costs(self,dbname):
start = time.time()
status = self.check_vgroups_revote_leader(dbname)
while not status:
time.sleep(0.1)
status = self.check_vgroups_revote_leader(dbname)
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
tdLog.info(" ==== database %s revote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s revote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
return cost_time
def exec_revote_action(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
before_revote = tdSql.queryResult
before_vgroups = set()
for vgroup_info in before_revote:
before_vgroups.add(vgroup_info[3:-4])
self.random_stop_One_dnode()
tdSql.query("show {}.vgroups".format(dbname))
after_revote = tdSql.queryResult
after_vgroups = set()
for vgroup_info in after_revote:
after_vgroups.add(vgroup_info[3:-4])
vote_act = set(set(after_vgroups)-set(before_vgroups))
if not vote_act:
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
if role==self.dnode_list[self.stop_dnode][0]:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
tdLog.info(" === revote leader ok , leader is {} now ====".format(list(vgroup_info).index("leader")-1))
elif vgroup_info[ind+1] !="offline":
tdLog.exit(" === dnode {} should be offline ".format(self.stop_dnode))
else:
continue
break
self.revote_leader_time_costs(dbname)
self.Restart_stop_dnode()
def test_init_vgroups_time_costs(self):
tdLog.info(" ====start check time cost about vgroups vote leaders ==== ")
tdLog.info(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
# create database replica 3 vgroups 1
db1 = 'db_1'
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
tdLog.info('=======database {} replica 3 vgroups 1 ======'.format(db1))
tdSql.execute(create_db_replica_3_vgroups_1)
self.vote_leader_time_costs(db1)
self.exec_revote_action(db1)
# create database replica 3 vgroups 10
db2 = 'db_2'
create_db_replica_3_vgroups_10 = "create database {} replica 3 vgroups 10".format(db2)
tdLog.info('=======database {} replica 3 vgroups 10 ======'.format(db2))
tdSql.execute(create_db_replica_3_vgroups_10)
self.vote_leader_time_costs(db2)
self.exec_revote_action(db2)
# create database replica 3 vgroups 100
db3 = 'db_3'
create_db_replica_3_vgroups_100 = "create database {} replica 3 vgroups 100".format(db3)
tdLog.info('=======database {} replica 3 vgroups 100 ======'.format(db3))
tdSql.execute(create_db_replica_3_vgroups_100)
self.vote_leader_time_costs(db3)
self.exec_revote_action(db3)
def run(self):
self.check_setup_cluster_status()
self.test_init_vgroups_time_costs()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -18,7 +18,7 @@ class TDTestCase:
def __init__(self): def __init__(self):
self.snapshot = 0 self.snapshot = 0
self.vgroups = 2 self.vgroups = 2
self.ctbNum = 1000 self.ctbNum = 100
self.rowsPerTbl = 1000 self.rowsPerTbl = 1000
def init(self, conn, logSql): def init(self, conn, logSql):
@ -38,9 +38,9 @@ class TDTestCase:
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb', 'ctbPrefix': 'ctb',
'ctbStartIdx': 0, 'ctbStartIdx': 0,
'ctbNum': 1000, 'ctbNum': 100,
'rowsPerTbl': 1000, 'rowsPerTbl': 1000,
'batchNum': 100, 'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3, 'pollDelay': 3,
'showMsg': 1, 'showMsg': 1,
@ -131,13 +131,13 @@ class TDTestCase:
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
# time.sleep(3) # time.sleep(3)
tmqCom.getStartCommitNotifyFromTmqsim() tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("================= restart dnode ===========================") tdLog.info("================= restart dnode ===========================")
tdDnodes.stop(1) tdDnodes.stoptaosd(1)
tdDnodes.start(1) tdDnodes.starttaosd(1)
time.sleep(3) # time.sleep(3)
tdLog.info("insert process end, and start to check consume result") tdLog.info(" restart taosd end and wait to check consume result")
expectRows = 1 expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows) resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0 totalConsumeRows = 0
@ -186,7 +186,7 @@ class TDTestCase:
# tdLog.info("****************************************************************************") # tdLog.info("****************************************************************************")
tmqCom.waitSubscriptionExit(tdSql) tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
tdSql.query("drop topic %s"%topicFromStb1) tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ") tdLog.printNoPrefix("======== test case 1 end ...... ")
@ -204,11 +204,11 @@ class TDTestCase:
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}], 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb', 'ctbPrefix': 'ctb',
'ctbStartIdx': 0, 'ctbStartIdx': 0,
'ctbNum': 1000, 'ctbNum': 100,
'rowsPerTbl': 1000, 'rowsPerTbl': 1000,
'batchNum': 3000, 'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5, 'pollDelay': 20,
'showMsg': 1, 'showMsg': 1,
'showRow': 1, 'showRow': 1,
'snapshot': 0} 'snapshot': 0}
@ -250,15 +250,15 @@ class TDTestCase:
tdLog.info("start consume processor") tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tmqCom.getStartCommitNotifyFromTmqsim() tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("================= restart dnode ===========================") tdLog.info("================= restart dnode ===========================")
tdDnodes.stop(1) tdDnodes.stoptaosd(1)
tdDnodes.start(1) tdDnodes.starttaosd(1)
time.sleep(3) # time.sleep(3)
# tdLog.info("create some new child table and insert data ") tdLog.info("create some new child table and insert data ")
# paraDict["batchNum"] = 1000 paraDict["batchNum"] = 100
# paraDict["ctbPrefix"] = 'newCtb' paraDict["ctbPrefix"] = 'newCtb'
# tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"]) # tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
tdLog.info("insert process end, and start to check consume result") tdLog.info("insert process end, and start to check consume result")
@ -275,6 +275,7 @@ class TDTestCase:
if totalConsumeRows != totalRowsFromQuery: if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!") tdLog.exit("tmq consume rows error!")
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
tdSql.query("drop topic %s"%topicFromStb1) tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ") tdLog.printNoPrefix("======== test case 2 end ...... ")

View File

@ -221,7 +221,7 @@ python3 ./test.py -f 7-tmq/tmqDropStb.py
python3 ./test.py -f 7-tmq/tmqDropStbCtb.py python3 ./test.py -f 7-tmq/tmqDropStbCtb.py
python3 ./test.py -f 7-tmq/tmqDropNtb.py python3 ./test.py -f 7-tmq/tmqDropNtb.py
python3 ./test.py -f 7-tmq/tmqUdf.py python3 ./test.py -f 7-tmq/tmqUdf.py
# python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py
python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py
python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py

View File

@ -352,6 +352,29 @@ void ltrim(char* str) {
// return str; // return str;
} }
int queryDB(TAOS* taos, char* command) {
int retryCnt = 10;
int code;
TAOS_RES* pRes;
while (retryCnt--) {
pRes = taos_query(taos, command);
code = taos_errno(pRes);
if (code != 0) {
taosSsleep(1);
taos_free_result(pRes);
continue;
}
taos_free_result(pRes);
return 0;
}
pError("failed to reason:%s, sql: %s", tstrerror(code), command);
taos_free_result(pRes);
return -1;
}
void addRowsToVgroupId(SThreadInfo* pInfo, int32_t vgroupId, int32_t rows) { void addRowsToVgroupId(SThreadInfo* pInfo, int32_t vgroupId, int32_t rows) {
int32_t i; int32_t i;
for (i = 0; i < pInfo->numOfVgroups; i++) { for (i = 0; i < pInfo->numOfVgroups; i++) {
@ -374,30 +397,49 @@ void addRowsToVgroupId(SThreadInfo* pInfo, int32_t vgroupId, int32_t rows) {
} }
} }
TAOS* createNewTaosConnect() {
TAOS* taos = NULL;
int32_t retryCnt = 10;
while (retryCnt--) {
TAOS* taos = taos_connect(NULL, "root", "taosdata", NULL, 0);
if (NULL != taos) {
return taos;
}
taosSsleep(1);
}
taosFprintfFile(g_fp, "taos_connect() fail\n");
return NULL;
}
int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) { int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) {
char sqlStr[1100] = {0}; char sqlStr[1100] = {0};
if (strlen(buf) > 1024) { if (strlen(buf) > 1024) {
taosFprintfFile(g_fp, "The length of one row[%d] is overflow 1024\n", strlen(buf)); taosFprintfFile(g_fp, "The length of one row[%d] is overflow 1024\n", strlen(buf));
taosCloseFile(&g_fp); taosCloseFile(&g_fp);
exit(-1); return -1;
} }
TAOS* pConn = taos_connect(NULL, "root", "taosdata", NULL, 0); TAOS* pConn = taos_connect(NULL, "root", "taosdata", NULL, 0);
assert(pConn != NULL); if (pConn == NULL) {
taosFprintfFile(g_fp, "taos_connect() fail, can not save consume result to main script\n");
return -1;
}
sprintf(sqlStr, "insert into %s.content_%d values (%" PRId64 ", \'%s\')", g_stConfInfo.cdbName, pInfo->consumerId, sprintf(sqlStr, "insert into %s.content_%d values (%" PRId64 ", \'%s\')", g_stConfInfo.cdbName, pInfo->consumerId,
pInfo->ts++, buf); pInfo->ts++, buf);
TAOS_RES* pRes = taos_query(pConn, sqlStr); int retCode = queryDB(pConn, sqlStr);
if (taos_errno(pRes) != 0) { if (retCode != 0) {
pError("error in insert consume result, reason:%s\n", taos_errstr(pRes)); taosFprintfFile(g_fp, "error in save consume content\n");
taosFprintfFile(g_fp, "error in insert consume result, reason:%s\n", taos_errstr(pRes));
taosCloseFile(&g_fp); taosCloseFile(&g_fp);
taos_free_result(pRes); taos_close(pConn);
exit(-1); exit(-1);
} }
taos_free_result(pRes); taos_close(pConn);
return 0; return 0;
} }
@ -591,15 +633,12 @@ static int32_t meta_msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIn
int32_t code = tmq_get_raw_meta(msg, &raw); int32_t code = tmq_get_raw_meta(msg, &raw);
if(code == TSDB_CODE_SUCCESS){ if(code == TSDB_CODE_SUCCESS){
TAOS_RES* pRes = taos_query(pInfo->taos, "use metadb"); int retCode = queryDB(pInfo->taos, "use metadb");
if (taos_errno(pRes) != 0) { if (retCode != 0) {
pError("error when use metadb, reason:%s\n", taos_errstr(pRes)); taosFprintfFile(g_fp, "error when use metadb\n");
taosFprintfFile(g_fp, "error when use metadb, reason:%s\n", taos_errstr(pRes));
taosCloseFile(&g_fp); taosCloseFile(&g_fp);
taos_free_result(pRes);
exit(-1); exit(-1);
} }
taos_free_result(pRes);
taosFprintfFile(g_fp, "raw:%p\n", &raw); taosFprintfFile(g_fp, "raw:%p\n", &raw);
taos_write_raw_meta(pInfo->taos, raw); taos_write_raw_meta(pInfo->taos, raw);
@ -618,19 +657,6 @@ static int32_t meta_msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIn
return totalRows; return totalRows;
} }
int queryDB(TAOS* taos, char* command) {
TAOS_RES* pRes = taos_query(taos, command);
int code = taos_errno(pRes);
if (code != 0) {
pError("failed to reason:%s, sql: %s", tstrerror(code), command);
taos_free_result(pRes);
return -1;
}
taos_free_result(pRes);
return 0;
}
static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) {} static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) {}
int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) { int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) {
@ -720,15 +746,12 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) {
char tmpString[128]; char tmpString[128];
taosFprintfFile(g_fp, "%s, consume id %d result: %s\n", getCurrentTimeString(tmpString), pInfo->consumerId, sqlStr); taosFprintfFile(g_fp, "%s, consume id %d result: %s\n", getCurrentTimeString(tmpString), pInfo->consumerId, sqlStr);
TAOS_RES* pRes = taos_query(pInfo->taos, sqlStr); int retCode = queryDB(pInfo->taos, sqlStr);
if (taos_errno(pRes) != 0) { if (retCode != 0) {
pError("error in save consumeinfo, reason:%s\n", taos_errstr(pRes)); taosFprintfFile(g_fp, "consume id %d error in save consume result\n", pInfo->consumerId);
taos_free_result(pRes); return -1;
exit(-1);
} }
taos_free_result(pRes);
return 0; return 0;
} }
@ -823,18 +846,18 @@ void loop_consume(SThreadInfo* pInfo) {
void* consumeThreadFunc(void* param) { void* consumeThreadFunc(void* param) {
SThreadInfo* pInfo = (SThreadInfo*)param; SThreadInfo* pInfo = (SThreadInfo*)param;
pInfo->taos = taos_connect(NULL, "root", "taosdata", NULL, 0); pInfo->taos = createNewTaosConnect();
if (pInfo->taos == NULL) { if (pInfo->taos == NULL) {
taosFprintfFile(g_fp, "taos_connect() fail, can not notify and save consume result to main scripte\n"); taosFprintfFile(g_fp, "taos_connect() fail, can not notify and save consume result to main scripte\n");
ASSERT(0);
return NULL; return NULL;
} }
build_consumer(pInfo); build_consumer(pInfo);
build_topic_list(pInfo); build_topic_list(pInfo);
if ((NULL == pInfo->tmq) || (NULL == pInfo->topicList)) { if ((NULL == pInfo->tmq) || (NULL == pInfo->topicList)) {
taosFprintfFile(g_fp, "create consumer fail! tmq is null or topicList is null\n"); taosFprintfFile(g_fp, "create consumer fail! tmq is null or topicList is null\n");
assert(0); taos_close(pInfo->taos);
pInfo->taos = NULL;
return NULL; return NULL;
} }
@ -842,7 +865,8 @@ void* consumeThreadFunc(void* param) {
if (err != 0) { if (err != 0) {
pError("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err)); pError("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err));
taosFprintfFile(g_fp, "tmq_subscribe() fail! reason: %s\n", tmq_err2str(err)); taosFprintfFile(g_fp, "tmq_subscribe() fail! reason: %s\n", tmq_err2str(err));
assert(0); taos_close(pInfo->taos);
pInfo->taos = NULL;
return NULL; return NULL;
} }
@ -926,17 +950,20 @@ void parseConsumeInfo() {
int32_t getConsumeInfo() { int32_t getConsumeInfo() {
char sqlStr[1024] = {0}; char sqlStr[1024] = {0};
TAOS* pConn = taos_connect(NULL, "root", "taosdata", NULL, 0); TAOS* pConn = createNewTaosConnect();
assert(pConn != NULL); if (pConn == NULL) {
taosFprintfFile(g_fp, "taos_connect() fail, can not get consume info for start consumer\n");
return -1;
}
sprintf(sqlStr, "select * from %s.consumeinfo", g_stConfInfo.cdbName); sprintf(sqlStr, "select * from %s.consumeinfo", g_stConfInfo.cdbName);
TAOS_RES* pRes = taos_query(pConn, sqlStr); TAOS_RES *pRes = taos_query(pConn, sqlStr);
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
pError("error in get consumeinfo, reason:%s\n", taos_errstr(pRes)); taosFprintfFile(g_fp, "error in get consumeinfo for %s\n", taos_errstr(pRes));
taosFprintfFile(g_fp, "error in get consumeinfo, reason:%s\n", taos_errstr(pRes));
taosCloseFile(&g_fp); taosCloseFile(&g_fp);
taos_free_result(pRes); taos_free_result(pRes);
exit(-1); taos_close(pConn);
return -1;
} }
TAOS_ROW row = NULL; TAOS_ROW row = NULL;
@ -981,6 +1008,7 @@ int32_t getConsumeInfo() {
taos_free_result(pRes); taos_free_result(pRes);
parseConsumeInfo(); parseConsumeInfo();
taos_close(pConn);
return 0; return 0;
} }
@ -1123,7 +1151,6 @@ void* ombConsumeThreadFunc(void* param) {
if ((NULL == pInfo->tmq) || (NULL == pInfo->topicList)) { if ((NULL == pInfo->tmq) || (NULL == pInfo->topicList)) {
taosFprintfFile(g_fp, "create consumer fail! tmq is null or topicList is null\n"); taosFprintfFile(g_fp, "create consumer fail! tmq is null or topicList is null\n");
assert(0);
return NULL; return NULL;
} }
@ -1131,7 +1158,6 @@ void* ombConsumeThreadFunc(void* param) {
if (err != 0) { if (err != 0) {
pError("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err)); pError("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err));
taosFprintfFile(g_fp, "tmq_subscribe() fail! reason: %s\n", tmq_err2str(err)); taosFprintfFile(g_fp, "tmq_subscribe() fail! reason: %s\n", tmq_err2str(err));
assert(0);
return NULL; return NULL;
} }
@ -1181,9 +1207,9 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type) {
void* ombProduceThreadFunc(void* param) { void* ombProduceThreadFunc(void* param) {
SThreadInfo* pInfo = (SThreadInfo*)param; SThreadInfo* pInfo = (SThreadInfo*)param;
pInfo->taos = taos_connect(NULL, "root", "taosdata", NULL, 0); pInfo->taos = createNewTaosConnect();
if (pInfo->taos == NULL) { if (pInfo->taos == NULL) {
printf("taos_connect() fail\n"); taosFprintfFile(g_fp, "taos_connect() fail, can not start producers!\n");
return NULL; return NULL;
} }
@ -1200,6 +1226,8 @@ void* ombProduceThreadFunc(void* param) {
char* sqlBuf = taosMemoryMalloc(MAX_SQL_LEN); char* sqlBuf = taosMemoryMalloc(MAX_SQL_LEN);
if (NULL == sqlBuf) { if (NULL == sqlBuf) {
printf("malloc fail for sqlBuf\n"); printf("malloc fail for sqlBuf\n");
taos_close(pInfo->taos);
pInfo->taos = NULL;
return NULL; return NULL;
} }
@ -1232,6 +1260,8 @@ void* ombProduceThreadFunc(void* param) {
int64_t affectedRows = queryDbExec(pInfo->taos, sqlBuf, INSERT_TYPE); int64_t affectedRows = queryDbExec(pInfo->taos, sqlBuf, INSERT_TYPE);
if (affectedRows < 0) { if (affectedRows < 0) {
taos_close(pInfo->taos);
pInfo->taos = NULL;
return NULL; return NULL;
} }
@ -1266,6 +1296,8 @@ void* ombProduceThreadFunc(void* param) {
} }
printf("affectedRowsTotal: %"PRId64"\n", affectedRowsTotal); printf("affectedRowsTotal: %"PRId64"\n", affectedRowsTotal);
taos_close(pInfo->taos);
pInfo->taos = NULL;
return NULL; return NULL;
} }
@ -1301,10 +1333,9 @@ void startOmbConsume() {
taosThreadAttrSetDetachState(&thattr, PTHREAD_CREATE_JOINABLE); taosThreadAttrSetDetachState(&thattr, PTHREAD_CREATE_JOINABLE);
if (0 != g_stConfInfo.producers) { if (0 != g_stConfInfo.producers) {
TAOS* taos = taos_connect(NULL, "root", "taosdata", NULL, 0); TAOS* taos = createNewTaosConnect();
if (taos == NULL) { if (taos == NULL) {
taosFprintfFile(g_fp, "taos_connect() fail, can not notify and save consume result to main scripte\n"); taosFprintfFile(g_fp, "taos_connect() fail, can not create db, stbl, ctbl, topic!\n");
ASSERT(0);
return ; return ;
} }
@ -1357,9 +1388,11 @@ void startOmbConsume() {
taosFprintfFile(g_fp, "==== close tmqlog ====\n"); taosFprintfFile(g_fp, "==== close tmqlog ====\n");
taosCloseFile(&g_fp); taosCloseFile(&g_fp);
taos_close(taos);
return; return;
} }
taos_close(taos);
} }
// pthread_create one thread to consume // pthread_create one thread to consume
@ -1418,7 +1451,11 @@ int main(int32_t argc, char* argv[]) {
return 0; return 0;
} }
getConsumeInfo(); int32_t retCode = getConsumeInfo();
if (0 != retCode) {
return -1;
}
saveConfigToLogFile(); saveConfigToLogFile();
tmqSetSignalHandle(); tmqSetSignalHandle();

View File

@ -524,10 +524,8 @@ int32_t shellReadCommand(char *command) {
c = taosGetConsoleChar(); c = taosGetConsoleChar();
switch (c) { switch (c) {
case 'A': // Up arrow case 'A': // Up arrow
if (hist_counter != pHistory->hstart) { hist_counter = (hist_counter + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE;
hist_counter = (hist_counter + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE; shellResetCommand(&cmd, (pHistory->hist[hist_counter] == NULL) ? "" : pHistory->hist[hist_counter]);
shellResetCommand(&cmd, (pHistory->hist[hist_counter] == NULL) ? "" : pHistory->hist[hist_counter]);
}
break; break;
case 'B': // Down arrow case 'B': // Down arrow
if (hist_counter != pHistory->hend) { if (hist_counter != pHistory->hend) {