Merge remote-tracking branch 'origin/3.0' into feature/3.0_wxy

This commit is contained in:
Xiaoyu Wang 2022-07-30 15:30:33 +08:00
commit 67c0c7e473
151 changed files with 3118 additions and 3062 deletions

12
.gitmodules vendored
View File

@ -1,12 +0,0 @@
[submodule "src/connector/go"]
path = src/connector/go
url = git@github.com:taosdata/driver-go.git
[submodule "src/connector/hivemq-tdengine-extension"]
path = src/connector/hivemq-tdengine-extension
url = git@github.com:taosdata/hivemq-tdengine-extension.git
[submodule "deps/TSZ"]
path = deps/TSZ
url = https://github.com/taosdata/TSZ.git
[submodule "examples/rust"]
path = examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git

View File

@ -118,6 +118,7 @@ def pre_test(){
git rm --cached tools/taos-tools 2>/dev/null || :
git rm --cached tools/taosadapter 2>/dev/null || :
git rm --cached tools/taosws-rs 2>/dev/null || :
git rm --cached examples/rust 2>/dev/null || :
'''
sh '''
cd ${WKC}
@ -269,6 +270,7 @@ def pre_test_win(){
git rm --cached tools/taos-tools 2>nul
git rm --cached tools/taosadapter 2>nul
git rm --cached tools/taosws-rs 2>nul
git rm --cached examples/rust 2>nul
exit 0
'''
bat '''

View File

@ -90,6 +90,12 @@ ELSE ()
ENDIF ()
ENDIF ()
option(
RUST_BINDINGS
"If build with rust-bindings"
ON
)
option(
JEMALLOC_ENABLED
"If build with jemalloc"

View File

@ -0,0 +1,12 @@
# rust-bindings
ExternalProject_Add(rust-bindings
GIT_REPOSITORY https://github.com/songtianyi/tdengine-rust-bindings.git
GIT_TAG 7ed7a97
SOURCE_DIR "${TD_SOURCE_DIR}/examples/rust"
BINARY_DIR "${TD_SOURCE_DIR}/examples/rust"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -1,5 +1,5 @@
# zlib
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG df8678f

View File

@ -1,8 +1,8 @@
# zlib
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG c529299
GIT_TAG 9dc2fec
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -1,5 +1,5 @@
# zlib
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git
GIT_TAG 9de599d

View File

@ -105,6 +105,11 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# rust-bindings
if(${RUST_BINDINGS})
cat("${TD_SUPPORT_DIR}/rust-bindings_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${RUST_BINDINGS})
# lucene
if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -135,6 +140,24 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
# clear submodule
execute_process(COMMAND git submodule deinit -f tools/taos-tools
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git rm --cached tools/taos-tools
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git submodule deinit -f tools/taosadapter
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git rm --cached tools/taosadapter
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git submodule deinit -f tools/taosws-rs
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git rm --cached tools/taosws-rs
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git submodule deinit -f examples/rust
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git rm --cached examples/rust
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
# ================================================================================================
# Build

@ -1 +0,0 @@
Subproject commit 7ed7a97715388fa144718764d6bf20f9bfc29a12

View File

@ -155,7 +155,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t
void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes);
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/);
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);

View File

@ -16,65 +16,9 @@
#define _DEFAULT_SOURCE
#include "tdatablock.h"
#include "tcompare.h"
#include "tglobal.h"
#include "tlog.h"
#include "tname.h"
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) {
pEp->port = 0;
strcpy(pEp->fqdn, ep);
char* temp = strchr(pEp->fqdn, ':');
if (temp) {
*temp = 0;
pEp->port = atoi(temp + 1);
}
if (pEp->port == 0) {
pEp->port = tsServerPort;
}
return 0;
}
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port) {
if (pEpSet == NULL || fqdn == NULL || strlen(fqdn) == 0) {
return;
}
int32_t index = pEpSet->numOfEps;
tstrncpy(pEpSet->eps[index].fqdn, fqdn, tListLen(pEpSet->eps[index].fqdn));
pEpSet->eps[index].port = port;
pEpSet->numOfEps += 1;
}
bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2) {
if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
return false;
}
for (int32_t i = 0; i < s1->numOfEps; i++) {
if (s1->eps[i].port != s2->eps[i].port || strncmp(s1->eps[i].fqdn, s2->eps[i].fqdn, TSDB_FQDN_LEN) != 0)
return false;
}
return true;
}
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) {
taosCorBeginWrite(&pEpSet->version);
pEpSet->epSet = *pNewEpSet;
taosCorEndWrite(&pEpSet->version);
}
SEpSet getEpSet_s(SCorEpSet* pEpSet) {
SEpSet ep = {0};
taosCorBeginRead(&pEpSet->version);
ep = pEpSet->epSet;
taosCorEndRead(&pEpSet->version);
return ep;
}
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
ASSERT(pColumnInfoData != NULL);
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
@ -1713,8 +1657,9 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows;
printf("%s |block type %d |child id %d|group id %" PRIu64 "\n", flag, (int32_t)pDataBlock->info.type,
pDataBlock->info.childId, pDataBlock->info.groupId);
printf("%s |block ver %" PRIi64 " |block type %d |child id %d|group id %" PRIu64 "\n", flag,
pDataBlock->info.version, (int32_t)pDataBlock->info.type, pDataBlock->info.childId,
pDataBlock->info.groupId);
for (int32_t j = 0; j < rows; j++) {
printf("%s |", flag);
for (int32_t k = 0; k < numOfCols; k++) {

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "tdatablock.h"
#include "tglobal.h"
#include "tlog.h"
#include "tname.h"
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) {
pEp->port = 0;
strcpy(pEp->fqdn, ep);
char* temp = strchr(pEp->fqdn, ':');
if (temp) {
*temp = 0;
pEp->port = atoi(temp + 1);
}
if (pEp->port == 0) {
pEp->port = tsServerPort;
}
return 0;
}
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port) {
if (pEpSet == NULL || fqdn == NULL || strlen(fqdn) == 0) {
return;
}
int32_t index = pEpSet->numOfEps;
tstrncpy(pEpSet->eps[index].fqdn, fqdn, tListLen(pEpSet->eps[index].fqdn));
pEpSet->eps[index].port = port;
pEpSet->numOfEps += 1;
}
bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2) {
if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
return false;
}
for (int32_t i = 0; i < s1->numOfEps; i++) {
if (s1->eps[i].port != s2->eps[i].port || strncmp(s1->eps[i].fqdn, s2->eps[i].fqdn, TSDB_FQDN_LEN) != 0)
return false;
}
return true;
}
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) {
taosCorBeginWrite(&pEpSet->version);
pEpSet->epSet = *pNewEpSet;
taosCorEndWrite(&pEpSet->version);
}
SEpSet getEpSet_s(SCorEpSet* pEpSet) {
SEpSet ep = {0};
taosCorBeginRead(&pEpSet->version);
ep = pEpSet->epSet;
taosCorEndRead(&pEpSet->version);
return ep;
}

View File

@ -20,34 +20,6 @@
#define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T)
bool tscValidateTableNameLength(size_t len) { return len < TSDB_TABLE_NAME_LEN; }
#if 0
// TODO refactor
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
if (numOfFilters == 0 || src == NULL) {
assert(src == NULL);
return NULL;
}
SColumnFilterInfo* pFilter = taosMemoryCalloc(1, numOfFilters * sizeof(SColumnFilterInfo));
memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
for (int32_t j = 0; j < numOfFilters; ++j) {
if (pFilter[j].filterstr) {
size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
pFilter[j].pz = (int64_t) taosMemoryCalloc(1, len);
memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t) pFilter[j].len);
}
}
assert(src->filterstr == 0 || src->filterstr == 1);
assert(!(src->lowerRelOptr == 0 && src->upperRelOptr == 0));
return pFilter;
}
#endif
#if 0
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {

View File

@ -31,6 +31,7 @@ target_sources(
"src/sma/smaOpen.c"
"src/sma/smaCommit.c"
"src/sma/smaRollup.c"
"src/sma/smaSnapshot.c"
"src/sma/smaTimeRange.c"
# tsdb

View File

@ -209,6 +209,9 @@ int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen,
// smaFileUtil ================
typedef struct SQTaskFReader SQTaskFReader;
typedef struct SQTaskFWriter SQTaskFWriter;
#define TD_FILE_HEAD_SIZE 512
typedef struct STFInfo STFInfo;

View File

@ -97,7 +97,6 @@ int32_t tRowMergerGetRow(SRowMerger *pMerger, STSRow **ppRow);
// TABLEID
int32_t tTABLEIDCmprFn(const void *p1, const void *p2);
// TSDBKEY
int32_t tsdbKeyCmprFn(const void *p1, const void *p2);
#define MIN_TSDBKEY(KEY1, KEY2) ((tsdbKeyCmprFn(&(KEY1), &(KEY2)) < 0) ? (KEY1) : (KEY2))
#define MAX_TSDBKEY(KEY1, KEY2) ((tsdbKeyCmprFn(&(KEY1), &(KEY2)) > 0) ? (KEY1) : (KEY2))
// SBlockCol
@ -558,6 +557,26 @@ struct STsdbReadSnap {
STsdbFS fs;
};
// ========== inline functions ==========
static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
TSDBKEY *pKey1 = (TSDBKEY *)p1;
TSDBKEY *pKey2 = (TSDBKEY *)p2;
if (pKey1->ts < pKey2->ts) {
return -1;
} else if (pKey1->ts > pKey2->ts) {
return 1;
}
if (pKey1->version < pKey2->version) {
return -1;
} else if (pKey1->version > pKey2->version) {
return 1;
}
return 0;
}
#ifdef __cplusplus
}
#endif

View File

@ -62,6 +62,8 @@ typedef struct SMetaSnapReader SMetaSnapReader;
typedef struct SMetaSnapWriter SMetaSnapWriter;
typedef struct STsdbSnapReader STsdbSnapReader;
typedef struct STsdbSnapWriter STsdbSnapWriter;
typedef struct SRsmaSnapReader SRsmaSnapReader;
typedef struct SRsmaSnapWriter SRsmaSnapWriter;
typedef struct SSnapDataHdr SSnapDataHdr;
#define VNODE_META_DIR "meta"
@ -196,13 +198,21 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr
int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback);
// STsdbSnapReader ========================================
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapReader** ppReader);
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader);
int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader);
int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData);
// STsdbSnapWriter ========================================
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter);
int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback);
// SRsmaSnapReader ========================================
int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader);
int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader);
int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData);
// SRsmaSnapWriter ========================================
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter);
int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback);
typedef struct {
int8_t streamType; // sma or other
@ -314,6 +324,15 @@ struct SSma {
// sma
void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);
enum {
SNAP_DATA_META = 0,
SNAP_DATA_TSDB = 1,
SNAP_DATA_DEL = 2,
SNAP_DATA_RSMA1 = 3,
SNAP_DATA_RSMA2 = 4,
SNAP_DATA_QTASK = 5,
};
struct SSnapDataHdr {
int8_t type;
int64_t index;

View File

@ -183,11 +183,11 @@ int metaClose(SMeta *pMeta) {
int32_t metaRLock(SMeta *pMeta) {
int32_t ret = 0;
metaDebug("meta rlock %p B", &pMeta->lock);
metaTrace("meta rlock %p B", &pMeta->lock);
ret = taosThreadRwlockRdlock(&pMeta->lock);
metaDebug("meta rlock %p E", &pMeta->lock);
metaTrace("meta rlock %p E", &pMeta->lock);
return ret;
}
@ -195,11 +195,11 @@ int32_t metaRLock(SMeta *pMeta) {
int32_t metaWLock(SMeta *pMeta) {
int32_t ret = 0;
metaDebug("meta wlock %p B", &pMeta->lock);
metaTrace("meta wlock %p B", &pMeta->lock);
ret = taosThreadRwlockWrlock(&pMeta->lock);
metaDebug("meta wlock %p E", &pMeta->lock);
metaTrace("meta wlock %p E", &pMeta->lock);
return ret;
}
@ -207,11 +207,11 @@ int32_t metaWLock(SMeta *pMeta) {
int32_t metaULock(SMeta *pMeta) {
int32_t ret = 0;
metaDebug("meta ulock %p B", &pMeta->lock);
metaTrace("meta ulock %p B", &pMeta->lock);
ret = taosThreadRwlockUnlock(&pMeta->lock);
metaDebug("meta ulock %p E", &pMeta->lock);
metaTrace("meta ulock %p E", &pMeta->lock);
return ret;
}

View File

@ -109,7 +109,7 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
}
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
pHdr->type = 0; // TODO: use macro
pHdr->type = SNAP_DATA_META;
pHdr->size = nData;
memcpy(pHdr->data, pData, nData);

View File

@ -49,7 +49,8 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapRead
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pSma->pRSmaTsdb[i]) {
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, &pReader->pDataReader[i]);
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, i == 0 ? SNAP_DATA_RSMA1 : SNAP_DATA_RSMA2,
&pReader->pDataReader[i]);
if (code < 0) {
goto _err;
}
@ -221,10 +222,9 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
}
}
smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
taosMemoryFree(pWriter);
*ppWriter = NULL;
smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
return code;
_err:
@ -245,15 +245,17 @@ int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData);
} else if (pHdr->type == SNAP_DATA_QTASK) {
code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData);
} else {
ASSERT(0);
}
if (code < 0) goto _err;
_exit:
smaInfo("vgId:%d rsma snapshot write for data %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
smaInfo("vgId:%d rsma snapshot write for data type %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
return code;
_err:
smaError("vgId:%d rsma snapshot write for data %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
smaError("vgId:%d rsma snapshot write for data type %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
tstrerror(code));
return code;
}

View File

@ -307,7 +307,11 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0};
fSma = *pRSet->pSmaF;
} else {
wSet.diskId = (SDiskID){.level = 0, .id = 0};
SDiskID did = {0};
tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
wSet.diskId = did;
wSet.fid = pCommitter->commitFid;
fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0};
fData = (SDataFile){.commitID = pCommitter->commitID, .size = 0};

View File

@ -21,6 +21,7 @@ struct STsdbSnapReader {
int64_t sver;
int64_t ever;
STsdbFS fs;
int8_t type;
// for data file
int8_t dataDone;
int32_t fid;
@ -62,7 +63,8 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
pReader->iBlockIdx = 0;
pReader->pBlockIdx = NULL;
tsdbInfo("vgId:%d vnode snapshot tsdb open data file to read, fid:%d", TD_VID(pTsdb->pVnode), pReader->fid);
tsdbInfo("vgId:%d vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path,
pReader->fid);
}
while (true) {
@ -130,7 +132,7 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
}
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
pHdr->type = 1;
pHdr->type = pReader->type;
pHdr->size = size;
TABLEID* pId = (TABLEID*)(&pHdr[1]);
@ -139,9 +141,9 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
tPutBlockData((uint8_t*)(&pId[1]), &pReader->nBlockData);
tsdbInfo("vgId:%d vnode snapshot read data, fid:%d suid:%" PRId64 " uid:%" PRId64
tsdbInfo("vgId:%d vnode snapshot read data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64
" iBlock:%d minVersion:%d maxVersion:%d nRow:%d out of %d size:%d",
TD_VID(pTsdb->pVnode), pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid,
TD_VID(pTsdb->pVnode), pTsdb->path, pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid,
pReader->iBlock - 1, pBlock->minVersion, pBlock->maxVersion, pReader->nBlockData.nRow, pBlock->nRow,
size);
@ -154,7 +156,8 @@ _exit:
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb read data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb read data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
return code;
}
@ -212,7 +215,7 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
}
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
pHdr->type = 2;
pHdr->type = SNAP_DATA_DEL;
pHdr->size = size;
TABLEID* pId = (TABLEID*)(&pHdr[1]);
@ -228,8 +231,8 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
n += tPutDelData((*ppData) + n, pDelData);
}
tsdbInfo("vgId:%d vnode snapshot tsdb read del data, suid:%" PRId64 " uid:%d" PRId64 " size:%d",
TD_VID(pTsdb->pVnode), pDelIdx->suid, pDelIdx->uid, size);
tsdbInfo("vgId:%d vnode snapshot tsdb read del data for %s, suid:%" PRId64 " uid:%d" PRId64 " size:%d",
TD_VID(pTsdb->pVnode), pTsdb->path, pDelIdx->suid, pDelIdx->uid, size);
break;
}
@ -238,11 +241,12 @@ _exit:
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb read del failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb read del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->pVnode,
tstrerror(code));
return code;
}
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapReader** ppReader) {
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader) {
int32_t code = 0;
STsdbSnapReader* pReader = NULL;
@ -255,6 +259,7 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapRe
pReader->pTsdb = pTsdb;
pReader->sver = sver;
pReader->ever = ever;
pReader->type = type;
code = taosThreadRwlockRdlock(&pTsdb->rwLock);
if (code) {
@ -297,12 +302,13 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapRe
goto _err;
}
tsdbInfo("vgId:%d vnode snapshot tsdb reader opened", TD_VID(pTsdb->pVnode));
tsdbInfo("vgId:%d vnode snapshot tsdb reader opened for %s", TD_VID(pTsdb->pVnode), pTsdb->path);
*ppReader = pReader;
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb reader open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
*ppReader = NULL;
return code;
}
@ -327,7 +333,7 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) {
tsdbFSUnref(pReader->pTsdb, &pReader->fs);
tsdbInfo("vgId:%d vnode snapshot tsdb reader closed", TD_VID(pReader->pTsdb->pVnode));
tsdbInfo("vgId:%d vnode snapshot tsdb reader closed for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path);
taosMemoryFree(pReader);
*ppReader = NULL;
@ -368,10 +374,12 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData) {
}
_exit:
tsdbDebug("vgId:%d vnode snapshot tsdb read for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path);
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb read failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb read for %s failed since %s", TD_VID(pReader->pTsdb->pVnode),
pReader->pTsdb->path, tstrerror(code));
return code;
}
@ -436,7 +444,8 @@ static int32_t tsdbSnapWriteAppendData(STsdbSnapWriter* pWriter, uint8_t* pData,
return code;
_err:
tsdbError("vgId:%d tsdb snapshot write append data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d tsdb snapshot write append data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
pWriter->pTsdb->path, tstrerror(code));
return code;
}
@ -522,9 +531,12 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
}
_exit:
tsdbInfo("vgId:%d tsdb snapshot write table data end for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
return code;
_err:
tsdbError("vgId:%d tsdb snapshot write table data end for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
pWriter->pTsdb->path, tstrerror(code));
return code;
}
@ -570,6 +582,8 @@ _exit:
return code;
_err:
tsdbError("vgId:%d tsdb snapshot move write table data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
pWriter->pTsdb->path, tstrerror(code));
return code;
}
@ -708,8 +722,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb write table data impl failed since %s", TD_VID(pWriter->pTsdb->pVnode),
tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb write table data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
pWriter->pTsdb->path, tstrerror(code));
return code;
}
@ -794,11 +808,12 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) {
if (code) goto _err;
_exit:
tsdbDebug("vgId:%d vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb write data impl failed since %s", TD_VID(pWriter->pTsdb->pVnode),
tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb write data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
pWriter->pTsdb->path, tstrerror(code));
return code;
}
@ -833,11 +848,12 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) {
}
_exit:
tsdbInfo("vgId:%d vnode snapshot tsdb writer data end", TD_VID(pTsdb->pVnode));
tsdbInfo("vgId:%d vnode snapshot tsdb writer data end for %s", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb writer data end failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb writer data end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
return code;
}
@ -920,12 +936,13 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
code = tsdbSnapWriteTableData(pWriter, id);
if (code) goto _err;
tsdbInfo("vgId:%d vnode snapshot tsdb write data, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d",
TD_VID(pTsdb->pVnode), fid, id.suid, id.suid, pBlockData->nRow);
tsdbInfo("vgId:%d vnode snapshot tsdb write data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d",
TD_VID(pTsdb->pVnode), pTsdb->path, fid, id.suid, id.suid, pBlockData->nRow);
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb write data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb write data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
return code;
}
@ -1015,7 +1032,8 @@ _exit:
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb write del failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb write del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
return code;
}
@ -1056,11 +1074,12 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) {
}
_exit:
tsdbInfo("vgId:%d vnode snapshot tsdb write del end", TD_VID(pTsdb->pVnode));
tsdbInfo("vgId:%d vnode snapshot tsdb write del for %s end", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb write del end failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb write del end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
return code;
}
@ -1127,10 +1146,12 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
}
*ppWriter = pWriter;
return code;
tsdbInfo("vgId:%d tsdb snapshot writer open for %s succeed", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
_err:
tsdbError("vgId:%d tsdb snapshot writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d tsdb snapshot writer open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
*ppWriter = NULL;
return code;
}
@ -1157,14 +1178,16 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
if (code) goto _err;
}
tsdbInfo("vgId:%d vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
taosMemoryFree(pWriter);
*ppWriter = NULL;
return code;
_err:
tsdbError("vgId:%d vnode snapshot tsdb writer close failed since %s", TD_VID(pWriter->pTsdb->pVnode),
tstrerror(code));
tsdbError("vgId:%d vnode snapshot tsdb writer close for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
pWriter->pTsdb->path, tstrerror(code));
taosMemoryFree(pWriter);
*ppWriter = NULL;
return code;
}
@ -1173,7 +1196,7 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
// ts data
if (pHdr->type == 1) {
if (pHdr->type == SNAP_DATA_TSDB) {
code = tsdbSnapWriteData(pWriter, pData, nData);
if (code) goto _err;
@ -1186,15 +1209,17 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
}
// del data
if (pHdr->type == 2) {
if (pHdr->type == SNAP_DATA_DEL) {
code = tsdbSnapWriteDel(pWriter, pData, nData);
if (code) goto _err;
}
_exit:
tsdbDebug("vgId:%d tsdb snapshow write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
return code;
_err:
tsdbError("vgId:%d tsdb snapshow write failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
tsdbError("vgId:%d tsdb snapshow write for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path,
tstrerror(code));
return code;
}

View File

@ -151,26 +151,6 @@ int32_t tTABLEIDCmprFn(const void *p1, const void *p2) {
return 0;
}
// TSDBKEY =======================================================================
int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
TSDBKEY *pKey1 = (TSDBKEY *)p1;
TSDBKEY *pKey2 = (TSDBKEY *)p2;
if (pKey1->ts < pKey2->ts) {
return -1;
} else if (pKey1->ts > pKey2->ts) {
return 1;
}
if (pKey1->version < pKey2->version) {
return -1;
} else if (pKey1->version > pKey2->version) {
return 1;
}
return 0;
}
// TSDBKEY ======================================================
static FORCE_INLINE int32_t tPutTSDBKEY(uint8_t *p, TSDBKEY *pKey) {
int32_t n = 0;
@ -1401,7 +1381,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
break;
case TSDB_DATA_TYPE_BOOL:
break;
case TSDB_DATA_TYPE_TINYINT:{
case TSDB_DATA_TYPE_TINYINT: {
pColAgg->sum += colVal.value.i8;
if (pColAgg->min > colVal.value.i8) {
pColAgg->min = colVal.value.i8;
@ -1411,7 +1391,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
case TSDB_DATA_TYPE_SMALLINT:{
case TSDB_DATA_TYPE_SMALLINT: {
pColAgg->sum += colVal.value.i16;
if (pColAgg->min > colVal.value.i16) {
pColAgg->min = colVal.value.i16;
@ -1441,7 +1421,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
case TSDB_DATA_TYPE_FLOAT:{
case TSDB_DATA_TYPE_FLOAT: {
pColAgg->sum += colVal.value.f;
if (pColAgg->min > colVal.value.f) {
pColAgg->min = colVal.value.f;
@ -1451,7 +1431,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
case TSDB_DATA_TYPE_DOUBLE:{
case TSDB_DATA_TYPE_DOUBLE: {
pColAgg->sum += colVal.value.d;
if (pColAgg->min > colVal.value.d) {
pColAgg->min = colVal.value.d;
@ -1463,7 +1443,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
case TSDB_DATA_TYPE_VARCHAR:
break;
case TSDB_DATA_TYPE_TIMESTAMP:{
case TSDB_DATA_TYPE_TIMESTAMP: {
if (pColAgg->min > colVal.value.i64) {
pColAgg->min = colVal.value.i64;
}
@ -1474,7 +1454,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
case TSDB_DATA_TYPE_NCHAR:
break;
case TSDB_DATA_TYPE_UTINYINT:{
case TSDB_DATA_TYPE_UTINYINT: {
pColAgg->sum += colVal.value.u8;
if (pColAgg->min > colVal.value.u8) {
pColAgg->min = colVal.value.u8;
@ -1484,7 +1464,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
case TSDB_DATA_TYPE_USMALLINT:{
case TSDB_DATA_TYPE_USMALLINT: {
pColAgg->sum += colVal.value.u16;
if (pColAgg->min > colVal.value.u16) {
pColAgg->min = colVal.value.u16;
@ -1494,7 +1474,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
case TSDB_DATA_TYPE_UINT:{
case TSDB_DATA_TYPE_UINT: {
pColAgg->sum += colVal.value.u32;
if (pColAgg->min > colVal.value.u32) {
pColAgg->min = colVal.value.u32;
@ -1504,7 +1484,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
case TSDB_DATA_TYPE_UBIGINT:{
case TSDB_DATA_TYPE_UBIGINT: {
pColAgg->sum += colVal.value.u64;
if (pColAgg->min > colVal.value.u64) {
pColAgg->min = colVal.value.u64;

View File

@ -28,7 +28,8 @@ struct SVSnapReader {
int8_t tsdbDone;
STsdbSnapReader *pTsdbReader;
// rsma
int8_t rsmaDone[TSDB_RETENTION_L2];
int8_t rsmaDone;
SRsmaSnapReader *pRsmaReader;
};
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
@ -57,6 +58,10 @@ _err:
int32_t vnodeSnapReaderClose(SVSnapReader *pReader) {
int32_t code = 0;
if (pReader->pRsmaReader) {
rsmaSnapReaderClose(&pReader->pRsmaReader);
}
if (pReader->pTsdbReader) {
tsdbSnapReaderClose(&pReader->pTsdbReader);
}
@ -99,7 +104,7 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
if (!pReader->tsdbDone) {
// open if not
if (pReader->pTsdbReader == NULL) {
code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, &pReader->pTsdbReader);
code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, SNAP_DATA_TSDB, &pReader->pTsdbReader);
if (code) goto _err;
}
@ -118,40 +123,26 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
}
// RSMA ==============
#if 0
if (VND_IS_RSMA(pReader->pVnode)) {
// RSMA1/RSMA2
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (!pReader->rsmaDone[i]) {
if (!pReader->pVnode->pSma->pRSmaTsdb[i]) {
// no valid tsdb
pReader->rsmaDone[i] = 1;
continue;
}
if (pReader->pTsdbReader == NULL) {
code = tsdbSnapReaderOpen(pReader->pVnode->pSma->pRSmaTsdb[i], pReader->sver, pReader->ever,
&pReader->pTsdbReader);
if (code) goto _err;
}
if (VND_IS_RSMA(pReader->pVnode) && !pReader->rsmaDone) {
// open if not
if (pReader->pRsmaReader == NULL) {
code = rsmaSnapReaderOpen(pReader->pVnode->pSma, pReader->sver, pReader->ever, &pReader->pRsmaReader);
if (code) goto _err;
}
code = tsdbSnapRead(pReader->pTsdbReader, ppData);
if (code) {
goto _err;
} else {
if (*ppData) {
goto _exit;
} else {
pReader->tsdbDone = 1;
code = tsdbSnapReaderClose(&pReader->pTsdbReader);
if (code) goto _err;
}
}
code = rsmaSnapRead(pReader->pRsmaReader, ppData);
if (code) {
goto _err;
} else {
if (*ppData) {
goto _exit;
} else {
pReader->tsdbDone = 1;
code = rsmaSnapReaderClose(&pReader->pRsmaReader);
if (code) goto _err;
}
}
// QTaskInfoFile
// TODO ...
}
#endif
*ppData = NULL;
*nData = 0;
@ -186,6 +177,8 @@ struct SVSnapWriter {
SMetaSnapWriter *pMetaSnapWriter;
// tsdb
STsdbSnapWriter *pTsdbSnapWriter;
// rsma
SRsmaSnapWriter *pRsmaSnapWriter;
};
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) {
@ -235,6 +228,11 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
if (code) goto _err;
}
if (pWriter->pRsmaSnapWriter) {
code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback);
if (code) goto _err;
}
if (!rollback) {
SVnodeInfo info = {0};
char dir[TSDB_FILENAME_LEN];
@ -282,28 +280,51 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
vInfo("vgId:%d vnode snapshot write data, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode), pHdr->index,
pHdr->type, nData);
if (pHdr->type == 0) {
// meta
switch (pHdr->type) {
case SNAP_DATA_META: {
// meta
if (pWriter->pMetaSnapWriter == NULL) {
code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
if (code) goto _err;
}
if (pWriter->pMetaSnapWriter == NULL) {
code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
if (code) goto _err;
}
} break;
case SNAP_DATA_TSDB: {
// tsdb
if (pWriter->pTsdbSnapWriter == NULL) {
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
if (code) goto _err;
}
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
if (code) goto _err;
} else {
// tsdb
if (pWriter->pTsdbSnapWriter == NULL) {
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
if (code) goto _err;
}
} break;
case SNAP_DATA_RSMA1:
case SNAP_DATA_RSMA2: {
// rsma1/rsma2
if (pWriter->pRsmaSnapWriter == NULL) {
code = rsmaSnapWriterOpen(pVnode->pSma, pWriter->sver, pWriter->ever, &pWriter->pRsmaSnapWriter);
if (code) goto _err;
}
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
if (code) goto _err;
code = rsmaSnapWrite(pWriter->pRsmaSnapWriter, pData, nData);
if (code) goto _err;
} break;
case SNAP_DATA_QTASK: {
// qtask for rsma
if (pWriter->pRsmaSnapWriter == NULL) {
code = rsmaSnapWriterOpen(pVnode->pSma, pWriter->sver, pWriter->ever, &pWriter->pRsmaSnapWriter);
if (code) goto _err;
}
code = rsmaSnapWrite(pWriter->pRsmaSnapWriter, pData, nData);
if (code) goto _err;
} break;
default:
break;
}
_exit:
return code;

View File

@ -447,6 +447,7 @@ _err:
static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
SDecoder decoder = {0};
SEncoder encoder = {0};
int32_t rcode = 0;
SVCreateTbBatchReq req = {0};
SVCreateTbReq *pCreateReq;
@ -515,7 +516,6 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
tdUidStoreFree(pStore);
// prepare rsp
SEncoder encoder = {0};
int32_t ret = 0;
tEncodeSize(tEncodeSVCreateTbBatchRsp, &rsp, pRsp->contLen, ret);
pRsp->pCont = rpcMallocCont(pRsp->contLen);

View File

@ -679,6 +679,8 @@ void ctgClearSubTaskRes(SCtgSubRes *pRes);
void ctgFreeQNode(SCtgQNode *node);
void ctgClearHandle(SCatalog* pCtg);
void ctgFreeTbCacheImpl(SCtgTbCache *pCache);
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName);
int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup);
extern SCatalogMgmt gCtgMgmt;

View File

@ -92,7 +92,7 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx*
int32_t code = 0;
if (!CTG_FLAG_IS_SYS_DB(ctx->flag)) {
CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo));
CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo));
}
STableMetaOutput moutput = {0};
@ -337,7 +337,10 @@ int32_t ctgGetTbType(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName,
}
STableMeta* pMeta = NULL;
CTG_ERR_RET(catalogGetTableMeta(pCtg, pConn, pTableName, &pMeta));
SCtgTbMetaCtx ctx = {0};
ctx.pName = (SName*)pTableName;
ctx.flag = CTG_FLAG_UNKNOWN_STB;
CTG_ERR_RET(ctgGetTbMeta(pCtg, pConn, &ctx, &pMeta));
*tbType = pMeta->tableType;
taosMemoryFree(pMeta);
@ -391,7 +394,7 @@ int32_t ctgGetTbCfg(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName,
CTG_ERR_RET(ctgGetTableCfgFromMnode(pCtg, pConn, pTableName, pCfg, NULL));
} else {
SVgroupInfo vgroupInfo = {0};
CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, pTableName, &vgroupInfo));
CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, pTableName, &vgroupInfo));
CTG_ERR_RET(ctgGetTableCfgFromVnode(pCtg, pConn, pTableName, &vgroupInfo, pCfg, NULL));
}
@ -477,6 +480,57 @@ _return:
CTG_RET(code);
}
int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) {
if (IS_SYS_DBNAME(pTableName->dbname)) {
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
}
SCtgDBCache* dbCache = NULL;
int32_t code = 0;
char db[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pTableName, db);
SDBVgInfo *vgInfo = NULL;
CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
_return:
if (dbCache) {
ctgRUnlockVgInfo(dbCache);
ctgReleaseDBCache(pCtg, dbCache);
}
if (vgInfo) {
taosHashCleanup(vgInfo->vgHash);
taosMemoryFreeClear(vgInfo);
}
CTG_RET(code);
}
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName) {
int32_t code = 0;
if (NULL == pCtg || NULL == pTableName) {
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
}
if (NULL == pCtg->dbCache) {
return TSDB_CODE_SUCCESS;
}
CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true));
_return:
CTG_RET(code);
}
int32_t catalogInit(SCatalogCfg* cfg) {
if (gCtgMgmt.pCluster) {
qError("catalog already initialized");
@ -772,21 +826,7 @@ _return:
int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) {
CTG_API_ENTER();
int32_t code = 0;
if (NULL == pCtg || NULL == pTableName) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
if (NULL == pCtg->dbCache) {
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true));
_return:
CTG_API_LEAVE(code);
CTG_API_LEAVE(ctgRemoveTbMeta(pCtg, pTableName));
}
int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* stbName, uint64_t suid) {
@ -878,12 +918,12 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray*
case TSDB_CHILD_TABLE: {
SName stb = name;
strcpy(stb.tname, stbName);
catalogRemoveTableMeta(pCtg, &stb);
ctgRemoveTbMeta(pCtg, &stb);
break;
}
case TSDB_SUPER_TABLE:
case TSDB_NORMAL_TABLE:
catalogRemoveTableMeta(pCtg, &name);
ctgRemoveTbMeta(pCtg, &name);
break;
default:
ctgError("ignore table type %d", tbType);
@ -947,34 +987,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const
int32_t catalogGetTableHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) {
CTG_API_ENTER();
if (IS_SYS_DBNAME(pTableName->dbname)) {
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
SCtgDBCache* dbCache = NULL;
int32_t code = 0;
char db[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pTableName, db);
SDBVgInfo *vgInfo = NULL;
CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
_return:
if (dbCache) {
ctgRUnlockVgInfo(dbCache);
ctgReleaseDBCache(pCtg, dbCache);
}
if (vgInfo) {
taosHashCleanup(vgInfo->vgHash);
taosMemoryFreeClear(vgInfo);
}
CTG_API_LEAVE(code);
CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup));
}
int32_t catalogGetAllMeta(SCatalog* pCtg, SRequestConnInfo *pConn, const SCatalogReq* pReq, SMetaData* pRsp) {
@ -1200,7 +1213,7 @@ int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo *pConn, const
}
int32_t code = 0;
CTG_ERR_JRET(catalogRemoveTableMeta(pCtg, (SName*)pTableName));
CTG_ERR_JRET(ctgRemoveTbMeta(pCtg, (SName*)pTableName));
CTG_ERR_JRET(ctgGetTbCfg(pCtg, pConn, (SName*)pTableName, pCfg));

View File

@ -398,7 +398,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con
SName* name = taosHashIterate(pTb, NULL);
while (name) {
catalogRemoveTableMeta(pCtg, name);
ctgRemoveTbMeta(pCtg, name);
name = taosHashIterate(pTb, name);
}

View File

@ -855,7 +855,6 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWin
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
SArray* pColList);
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
@ -986,9 +985,8 @@ int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length);
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
const char* sql, EOPTR_EXEC_MODEL model);
int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity,
int32_t* resNum);
int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result);
int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length);

View File

@ -496,11 +496,9 @@ void qDestroyTask(qTaskInfo_t qTaskHandle) {
doDestroyTask(pTaskInfo);
}
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes) {
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int32_t capacity = 0;
return getOperatorExplainExecInfo(pTaskInfo->pRoot, pRes, &capacity, resNum);
return getOperatorExplainExecInfo(pTaskInfo->pRoot, pExecInfoList);
}
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len) {

View File

@ -4613,42 +4613,29 @@ void releaseQueryBuf(size_t numOfTables) {
atomic_add_fetch_64(&tsQueryBufferSizeBytes, t);
}
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity,
int32_t* resNum) {
if (*resNum >= *capacity) {
*capacity += 10;
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList) {
SExplainExecInfo execInfo = {0};
SExplainExecInfo* pExplainInfo = taosArrayPush(pExecInfoList, &execInfo);
*pRes = taosMemoryRealloc(*pRes, (*capacity) * sizeof(SExplainExecInfo));
if (NULL == *pRes) {
qError("malloc %d failed", (*capacity) * (int32_t)sizeof(SExplainExecInfo));
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
}
SExplainExecInfo* pInfo = &(*pRes)[*resNum];
pInfo->numOfRows = operatorInfo->resultInfo.totalRows;
pInfo->startupCost = operatorInfo->cost.openCost;
pInfo->totalCost = operatorInfo->cost.totalCost;
pExplainInfo->numOfRows = operatorInfo->resultInfo.totalRows;
pExplainInfo->startupCost = operatorInfo->cost.openCost;
pExplainInfo->totalCost = operatorInfo->cost.totalCost;
pExplainInfo->verboseLen = 0;
pExplainInfo->verboseInfo = NULL;
if (operatorInfo->fpSet.getExplainFn) {
int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pInfo->verboseInfo, &pInfo->verboseLen);
int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pExplainInfo->verboseInfo, &pExplainInfo->verboseLen);
if (code) {
qError("%s operator getExplainFn failed, code:%s", GET_TASKID(operatorInfo->pTaskInfo), tstrerror(code));
return code;
}
} else {
pInfo->verboseLen = 0;
pInfo->verboseInfo = NULL;
}
++(*resNum);
int32_t code = 0;
for (int32_t i = 0; i < operatorInfo->numOfDownstream; ++i) {
code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pRes, capacity, resNum);
if (code) {
taosMemoryFreeClear(*pRes);
code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pExecInfoList);
if (code != TSDB_CODE_SUCCESS) {
// taosMemoryFreeClear(*pRes);
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
}

View File

@ -31,14 +31,21 @@ static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity
static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
static void freeGroupKey(void* param) {
SGroupKeys* pKey = (SGroupKeys*) param;
taosMemoryFree(pKey->pData);
}
static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) {
SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
taosMemoryFreeClear(pInfo->keyBuf);
taosArrayDestroy(pInfo->pGroupCols);
taosArrayDestroy(pInfo->pGroupColVals);
taosArrayDestroyEx(pInfo->pGroupColVals, freeGroupKey);
cleanupExprSupp(&pInfo->scalarSup);
cleanupGroupResInfo(&pInfo->groupResInfo);
cleanupAggSup(&pInfo->aggSup);
taosMemoryFreeClear(param);
}
@ -414,8 +421,6 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
// pOperator->operatorType = OP_Groupby;
pOperator->exprSupp.pExprInfo = pExprInfo;
pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;

View File

@ -2414,7 +2414,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "hyperloglog",
.type = FUNCTION_TYPE_HYPERLOGLOG,
.classification = FUNC_MGT_AGG_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateHLL,
.getEnvFunc = getHLLFuncEnv,
.initFunc = functionSetup,
@ -2428,7 +2428,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_hyperloglog_partial",
.type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL,
.type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateHLLPartial,
.getEnvFunc = getHLLFuncEnv,
@ -2440,7 +2440,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_hyperloglog_merge",
.type = FUNCTION_TYPE_HYPERLOGLOG_MERGE,
.type = FUNCTION_TYPE_HYPERLOGLOG_MERGE | FUNC_MGT_TIMELINE_FUNC,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateHLLMerge,
.getEnvFunc = getHLLFuncEnv,

View File

@ -39,7 +39,7 @@ int32_t qwBuildAndSendFetchRsp(int32_t rspType, SRpcHandleInfo *pConn, SRetrieve
void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete);
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SQWTaskCtx *ctx);
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num);
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SArray* pExecList);
int32_t qwBuildAndSendErrorRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code);
void qwFreeFetchRsp(void *msg);
int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp);

View File

@ -82,8 +82,9 @@ int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t c
return TSDB_CODE_SUCCESS;
}
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num) {
SExplainRsp rsp = {.numOfPlans = num, .subplanInfo = execInfo};
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SArray* pExecList) {
SExplainExecInfo* pInfo = taosArrayGet(pExecList, 0);
SExplainRsp rsp = {.numOfPlans = taosArrayGetSize(pExecList), .subplanInfo = pInfo};
int32_t contLen = tSerializeSExplainRsp(NULL, 0, &rsp);
void * pRsp = rpcMallocCont(contLen);
@ -96,10 +97,9 @@ int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execIn
.code = 0,
.info = *pConn,
};
rpcRsp.info.ahandle = NULL;
tmsgSendRsp(&rpcRsp);
return TSDB_CODE_SUCCESS;
}

View File

@ -44,18 +44,24 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re
QW_RET(TSDB_CODE_SUCCESS);
}
static void freeItem(void* param) {
SExplainExecInfo* pInfo = param;
taosMemoryFree(pInfo->verboseInfo);
}
int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
qTaskInfo_t taskHandle = ctx->taskHandle;
if (TASK_TYPE_TEMP == ctx->taskType && taskHandle) {
if (ctx->explain) {
SExplainExecInfo *execInfo = NULL;
int32_t resNum = 0;
QW_ERR_RET(qGetExplainExecInfo(taskHandle, &resNum, &execInfo));
SArray* execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo));
QW_ERR_RET(qGetExplainExecInfo(taskHandle, execInfoList));
SRpcHandleInfo connInfo = ctx->ctrlConnInfo;
connInfo.ahandle = NULL;
QW_ERR_RET(qwBuildAndSendExplainRsp(&connInfo, execInfo, resNum));
int32_t code = qwBuildAndSendExplainRsp(&connInfo, execInfoList);
taosArrayDestroyEx(execInfoList, freeItem);
QW_ERR_RET(code);
}
if (!ctx->needFetch) {

View File

@ -790,65 +790,6 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
}
} while (0);
#if 0
// fake match
//
// condition1:
// I have snapshot, no log, preIndex > myLastIndex
//
// condition2:
// I have snapshot, have log, log <= snapshot, preIndex > myLastIndex
//
// condition3:
// I have snapshot, preIndex < snapshot.lastApplyIndex
//
// condition4:
// I have snapshot, preIndex == snapshot.lastApplyIndex, no data
//
// operation:
// match snapshot.lastApplyIndex - 1;
// no operation on log
do {
SyncIndex myLastIndex = syncNodeGetLastIndex(ths);
SSnapshot snapshot;
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
bool condition0 = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) &&
syncNodeHasSnapshot(ths);
bool condition1 =
condition0 && (ths->pLogStore->syncLogEntryCount(ths->pLogStore) == 0) && (pMsg->prevLogIndex > myLastIndex); // donot use syncLogEntryCount!!! use isEmpty
bool condition2 = condition0 && (ths->pLogStore->syncLogLastIndex(ths->pLogStore) <= snapshot.lastApplyIndex) &&
(pMsg->prevLogIndex > myLastIndex);
bool condition3 = condition0 && (pMsg->prevLogIndex < snapshot.lastApplyIndex);
bool condition4 = condition0 && (pMsg->prevLogIndex == snapshot.lastApplyIndex) && (pMsg->dataLen == 0);
bool condition = condition1 || condition2 || condition3 || condition4;
if (condition) {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, fake match, pre-index:%" PRId64 ", pre-term:%" PRIu64,
pMsg->prevLogIndex, pMsg->prevLogTerm);
syncNodeEventLog(ths, logBuf);
// prepare response msg
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
pReply->srcId = ths->myRaftId;
pReply->destId = pMsg->srcId;
pReply->term = ths->pRaftStore->currentTerm;
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
pReply->success = true;
pReply->matchIndex = snapshot.lastApplyIndex;
// send response
SRpcMsg rpcMsg;
syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg);
syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
syncAppendEntriesReplyDestroy(pReply);
return ret;
}
} while (0);
#endif
// fake match
//
// condition1:

View File

@ -283,12 +283,14 @@ print ================== server restart completed
sql connect
sql use first_db0;
sql select last(*), tbname from m1 group by tbname;
sql select last(*), tbname from m1 group by tbname order by tbname;
if $rows != 2 then
return -1
endi
if $data00 != @20-03-01 01:01:01.000@ then
print data00 $data00 != 20-03-01 01:01:01.000@
return -1
endi

View File

@ -47,7 +47,7 @@ endi
$replica = 3
$vgroups = 1
$retentions = 5s:7d,15s:21d
$retentions = 5s:7d,15s:21d,1m:365d
print ============= create database
sql create database db replica $replica vgroups $vgroups retentions $retentions
@ -114,7 +114,7 @@ endi
vg_ready:
print ====> create stable/child table
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum)
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum) watermark 3s,3s max_delay 3s,3s
sql show stables
if $rows != 1 then
@ -129,20 +129,28 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT
sleep 3000
print ===> write 100 records
$N = 100
$count = 0
while $count < $N
$ms = 1659000000000 + $count
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
$count = $count + 1
endw
print ===> write 0-50 records
$ms = 0
$cnt = 0
while $cnt < 50
$ms = $cnt . m
sql insert into ct1 values (now + $ms , $cnt , 2.1, 3.1)
$cnt = $cnt + 1
endw
print ===> flush database db
sql flush database db;
sleep 5000
print ===> write 51-100 records
while $cnt < 100
$ms = $cnt . m
sql insert into ct1 values (now + $ms , $cnt , 2.1, 3.1)
$cnt = $cnt + 1
endw
#sql flush database db;
sleep 3000
print ===> flush database db
sql flush database db;
sleep 5000
print ===> stop dnode1 dnode2 dnode3
@ -150,8 +158,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
sleep 10000
########################################################
print ===> start dnode1 dnode2 dnode3 dnode4
system sh/exec.sh -n dnode1 -s start
@ -164,7 +170,7 @@ sleep 3000
print =============== query data
sql connect
sql use db
sql select * from ct1
sql select * from ct1 where ts > now - 1d
print rows: $rows
print $data00 $data01 $data02
if $rows != 100 then

View File

@ -11,7 +11,7 @@ from util.dnodes import *
class TDTestCase:
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
@ -37,7 +37,7 @@ class TDTestCase:
def illegal_params(self):
illegal_params = ["1","0","NULL","False","True" ,"keep","now" ,"*" , "," ,"_" , "abc" ,"keep"]
for value in illegal_params:
tdSql.error("create database testdb replica 1 cachemodel '%s' " %value)
@ -80,9 +80,9 @@ class TDTestCase:
tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) )
def check_cachemodel_sets(self):
# check cache_last value for database
# check cache_last value for database
tdSql.query(" show databases ")
databases_infos = tdSql.queryResult
@ -96,10 +96,10 @@ class TDTestCase:
continue
cache_lasts[dbname]=self.getCacheModelNum(cache_last_value)
# cache_last_set value
# cache_last_set value
for k , v in cache_lasts.items():
if k=="testdb_"+str(self.getCacheModelStr(v)):
tdLog.info(" database %s cache_last value check pass, value is %s "%(k,self.getCacheModelStr(v)) )
else:
@ -116,7 +116,7 @@ class TDTestCase:
dataPath = buildPath + "/../sim/dnode1/data"
abs_vnodePath = os.path.abspath(dataPath)+"/vnode/"
tdLog.info("abs_vnodePath: %s" % abs_vnodePath)
tdSql.query(" show dnodes ")
dnode_id = tdSql.queryResult[0][0]
@ -127,7 +127,7 @@ class TDTestCase:
vgroups_infos = tdSql.queryResult
for vgroup_info in vgroups_infos:
vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json"
vnode_info_of_db = f"cat {vnode_json}"
vnode_info_of_db = f"cat {vnode_json}"
vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8")
infoDict = json.loads(vnode_info)
vnode_json_of_dbname = f"{dnode_id}."+ dbname
@ -142,7 +142,7 @@ class TDTestCase:
tdLog.exit("cacheLast not found in vnode.json of vnode%d "%(vgroup_info[0]))
def restart_check_cachemodel_sets(self):
for i in range(3):
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
@ -157,7 +157,7 @@ class TDTestCase:
self.prepare_datas()
self.check_cachemodel_sets()
self.restart_check_cachemodel_sets()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")

View File

@ -33,14 +33,14 @@ class TDTestCase:
tdSql.query('select database()')
tdSql.checkData(0,0,self.dbname)
tdSql.execute(f'drop database {self.dbname}')
def check_version(self):
taos_list = ['server','client']
for i in taos_list:
tdSql.query(f'select {i}_version()')
version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1]
tdSql.checkData(0,0,version_info)
def get_server_status(self):
sleep(self.delaytime)
tdSql.query('select server_status()')
@ -51,7 +51,7 @@ class TDTestCase:
if platform.system().lower() == 'windows':
sleep(10)
tdSql.error('select server_status()')
def run(self):
self.get_database_info()
self.check_version()
@ -61,4 +61,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -18,7 +18,7 @@ from util.dnodes import *
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
if len(key) == 0:
tdLog.exit("taos test key is null!")
if platform.system().lower() == 'windows':
taosCmd = buildPath + '\\build\\bin\\taos.exe '
taosCmd = taosCmd.replace('\\','\\\\')
@ -214,7 +214,7 @@ class TDTestCase:
retCode, retVal = taos_command(buildPath, "p", keyDict['p'], "taos>", keyDict['c'], '', "A", '')
if retCode != "TAOS_OK":
tdLog.exit("taos -A fail")
sqlString = 'create database ' + newDbName + ';'
retCode = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', retVal)
if retCode != "TAOS_OK":
@ -237,7 +237,7 @@ class TDTestCase:
tdLog.exit("taos -s fail")
print ("========== check new db ==========")
tdSql.query("show databases")
tdSql.query("show databases")
for i in range(tdSql.queryRows):
if tdSql.getData(i, 0) == newDbName:
break
@ -259,24 +259,24 @@ class TDTestCase:
if retCode != "TAOS_OK":
tdLog.exit("taos -s insert data fail")
sqlString = "select * from " + newDbName + ".ctb0"
sqlString = "select * from " + newDbName + ".ctb0"
tdSql.query(sqlString)
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
tdSql.checkData(0, 1, 10)
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
tdSql.checkData(1, 1, 20)
sqlString = "select * from " + newDbName + ".ctb1"
sqlString = "select * from " + newDbName + ".ctb1"
tdSql.query(sqlString)
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
tdSql.checkData(0, 1, 11)
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
tdSql.checkData(1, 1, 21)
keyDict['s'] = "\"select * from " + newDbName + ".ctb0\""
retCode = taos_command(buildPath, "s", keyDict['s'], "2021-04-01 08:00:01.000", keyDict['c'], '', '', '')
if retCode != "TAOS_OK":
tdLog.exit("taos -r show fail")
tdLog.printNoPrefix("================================ parameter: -r")
keyDict['s'] = "\"select * from " + newDbName + ".ctb0\""
retCode = taos_command(buildPath, "s", keyDict['s'], "1617235200000", keyDict['c'], '', 'r', '')
@ -287,9 +287,9 @@ class TDTestCase:
retCode = taos_command(buildPath, "s", keyDict['s'], "1617235201000", keyDict['c'], '', 'r', '')
if retCode != "TAOS_OK":
tdLog.exit("taos -r show fail")
tdSql.query('drop database %s'%newDbName)
tdLog.printNoPrefix("================================ parameter: -f")
pwd=os.getcwd()
newDbName="dbf"
@ -298,15 +298,15 @@ class TDTestCase:
sql2 = "echo use " + newDbName + " >> " + sqlFile
if platform.system().lower() == 'windows':
sql3 = "echo create table ntbf (ts timestamp, c binary(40)) >> " + sqlFile
sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
else:
sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
sql5 = "echo show databases >> " + sqlFile
os.system(sql1)
os.system(sql2)
os.system(sql3)
os.system(sql4)
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
sql5 = "echo show databases >> " + sqlFile
os.system(sql1)
os.system(sql2)
os.system(sql3)
os.system(sql4)
os.system(sql5)
keyDict['f'] = pwd + "/0-others/sql.txt"
@ -316,7 +316,7 @@ class TDTestCase:
tdLog.exit("taos -f fail")
print ("========== check new db ==========")
tdSql.query("show databases")
tdSql.query("show databases")
for i in range(tdSql.queryRows):
#print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0)))
if tdSql.getData(i, 0) == newDbName:
@ -324,13 +324,13 @@ class TDTestCase:
else:
tdLog.exit("create db fail after taos -f fail")
sqlString = "select * from " + newDbName + ".ntbf"
sqlString = "select * from " + newDbName + ".ntbf"
tdSql.query(sqlString)
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
tdSql.checkData(0, 1, 'test taos -f1')
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
tdSql.checkData(1, 1, 'test taos -f2')
shellCmd = "rm -f " + sqlFile
os.system(shellCmd)
tdSql.query('drop database %s'%newDbName)
@ -345,9 +345,9 @@ class TDTestCase:
#print ("-C return content:\n ", retVal)
totalCfgItem = {"firstEp":['', '', ''], }
for line in retVal.splitlines():
strList = line.split()
strList = line.split()
if (len(strList) > 2):
totalCfgItem[strList[1]] = strList
totalCfgItem[strList[1]] = strList
#print ("dict content:\n ", totalCfgItem)
firstEp = keyDict["h"] + ':' + keyDict['P']
@ -356,8 +356,8 @@ class TDTestCase:
if (totalCfgItem["rpcDebugFlag"][2] != self.rpcDebugFlagVal) and (totalCfgItem["rpcDebugFlag"][0] != 'cfg_file'):
tdLog.exit("taos -C return rpcDebugFlag error!")
count = os.cpu_count()
count = os.cpu_count()
if (totalCfgItem["numOfCores"][2] != count) and (totalCfgItem["numOfCores"][0] != 'default'):
tdLog.exit("taos -C return numOfCores error!")

View File

@ -18,7 +18,7 @@ from util.dnodes import *
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
if len(key) == 0:
tdLog.exit("taos test key is null!")
if platform.system().lower() == 'windows':
taosCmd = buildPath + '\\build\\bin\\taos.exe '
taosCmd = taosCmd.replace('\\','\\\\')
@ -231,7 +231,7 @@ class TDTestCase:
tdLog.info("taos -P %s test success"%keyDict['P'])
else:
tdLog.exit("taos -P %s fail"%keyDict['P'])
tdLog.printNoPrefix("================================ parameter: -f with error sql ")
pwd=os.getcwd()
newDbName="dbf"
@ -240,15 +240,15 @@ class TDTestCase:
sql2 = "echo use " + newDbName + " >> " + sqlFile
if platform.system().lower() == 'windows':
sql3 = "echo create table ntbf (ts timestamp, c binary(40)) no this item >> " + sqlFile
sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
else:
sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
sql5 = "echo show databases >> " + sqlFile
os.system(sql1)
os.system(sql2)
os.system(sql3)
os.system(sql4)
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
sql5 = "echo show databases >> " + sqlFile
os.system(sql1)
os.system(sql2)
os.system(sql3)
os.system(sql4)
os.system(sql5)
keyDict['f'] = pwd + "/0-others/sql.txt"
@ -258,7 +258,7 @@ class TDTestCase:
tdLog.exit("taos -f fail")
print ("========== check new db ==========")
tdSql.query("show databases")
tdSql.query("show databases")
for i in range(tdSql.queryRows):
#print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0)))
if tdSql.getData(i, 0) == newDbName:
@ -266,9 +266,9 @@ class TDTestCase:
else:
tdLog.exit("create db fail after taos -f fail")
sqlString = "select * from " + newDbName + ".ntbf"
sqlString = "select * from " + newDbName + ".ntbf"
tdSql.error(sqlString)
shellCmd = "rm -f " + sqlFile
os.system(shellCmd)
@ -281,16 +281,16 @@ class TDTestCase:
tdSql.query('drop database %s'%newDbName)
tdLog.printNoPrefix("================================ parameter: -a with error value")
#newDbName="dba"
errorPassword = 'errorPassword'
#newDbName="dba"
errorPassword = 'errorPassword'
sqlString = 'create database ' + newDbName + ';'
retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', errorPassword)
if retCode != "TAOS_FAIL":
tdLog.exit("taos -u %s -a %s"%(keyDict['u'], errorPassword))
tdLog.printNoPrefix("================================ parameter: -p with error value")
#newDbName="dba"
keyDict['p'] = 'errorPassword'
#newDbName="dba"
keyDict['p'] = 'errorPassword'
retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'p', keyDict['p'])
if retCode == "TAOS_FAIL" and "Authentication failure" in retVal:
tdLog.info("taos -p %s test success"%keyDict['p'])

View File

@ -18,7 +18,7 @@ from util.dnodes import *
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
if len(key) == 0:
tdLog.exit("taos test key is null!")
if platform.system().lower() == 'windows':
taosCmd = buildPath + '\\build\\bin\\taos.exe '
taosCmd = taosCmd.replace('\\','\\\\')
@ -158,34 +158,34 @@ class TDTestCase:
if "2: service ok" in retVal:
tdLog.info("taos -k success")
else:
tdLog.info(retVal)
tdLog.info(retVal)
tdLog.exit("taos -k fail 1")
# stop taosd
tdDnodes.stop(1)
#sleep(10)
#tdDnodes.start(1)
#sleep(5)
#sleep(5)
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
if "0: unavailable" in retVal:
tdLog.info("taos -k success")
else:
tdLog.info(retVal)
tdLog.info(retVal)
tdLog.exit("taos -k fail 2")
# restart taosd
tdDnodes.start(1)
#sleep(5)
#sleep(5)
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
if "2: service ok" in retVal:
tdLog.info("taos -k success")
else:
tdLog.info(retVal)
tdLog.info(retVal)
tdLog.exit("taos -k fail 3")
tdLog.printNoPrefix("================================ parameter: -n")
# stop taosd
tdDnodes.stop(1)
tdDnodes.stop(1)
try:
role = 'server'
@ -220,7 +220,7 @@ class TDTestCase:
#print(child.after.decode())
if i == 0:
tdLog.exit('taos -n server fail!')
expectString1 = 'response is received, size:' + pktLen
expectSTring2 = pktNum + '/' + pktNum
if expectString1 in retResult and expectSTring2 in retResult:

View File

@ -51,7 +51,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "version" not in infoDict["cluster_info"] or infoDict["cluster_info"]["version"] == None:
tdLog.exit("first_ep_dnode_id is null!")
if "master_uptime" not in infoDict["cluster_info"] or infoDict["cluster_info"]["master_uptime"] == None:
tdLog.exit("master_uptime is null!")
@ -69,13 +69,13 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "dnodes" not in infoDict["cluster_info"] or infoDict["cluster_info"]["dnodes"] == None :
tdLog.exit("dnodes is null!")
dnodes_info = { "dnode_id": 1,"dnode_ep": self.hostPort,"status":"ready"}
for k ,v in dnodes_info.items():
if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] :
tdLog.exit("dnodes info is null!")
mnodes_info = { "mnode_id":1, "mnode_ep": self.hostPort,"role": "leader" }
for k ,v in mnodes_info.items():
@ -86,7 +86,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "vgroup_infos" not in infoDict or infoDict["vgroup_infos"]== None:
tdLog.exit("vgroup_infos is null!")
vgroup_infos_nums = len(infoDict["vgroup_infos"])
for index in range(vgroup_infos_nums):
@ -116,14 +116,14 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "timeseries_total" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_total"] > 0:
tdLog.exit("timeseries_total is null!")
# dnode_info ====================================
if "dnode_info" not in infoDict or infoDict["dnode_info"]== None:
tdLog.exit("dnode_info is null!")
dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine',
'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select',
dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine',
'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select',
'req_select_rate', 'req_insert', 'req_insert_success', 'req_insert_rate', 'req_insert_batch', 'req_insert_batch_success',
'req_insert_batch_rate', 'errors', 'vnodes_num', 'masters', 'has_mnode', 'has_qnode', 'has_snode', 'has_bnode']
for elem in dnode_infos:
@ -134,7 +134,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "disk_infos" not in infoDict or infoDict["disk_infos"]== None:
tdLog.exit("disk_infos is null!")
# bug for data_dir
if "datadir" not in infoDict["disk_infos"] or len(infoDict["disk_infos"]["datadir"]) <=0 :
tdLog.exit("datadir is null!")
@ -187,7 +187,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
# log_infos ====================================
if "log_infos" not in infoDict or infoDict["log_infos"]== None:
tdLog.exit("log_infos is null!")
@ -206,13 +206,13 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4:
tdLog.exit("summary is null!")
if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 :
tdLog.exit("total is null!")
if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
tdLog.exit("level is null!")
def do_GET(self):
"""
process GET request
@ -227,25 +227,25 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if contentEncoding == 'gzip':
req_body = self.rfile.read(int(self.headers["Content-Length"]))
plainText = gzip.decompress(req_body).decode()
else:
else:
plainText = self.rfile.read(int(self.headers["Content-Length"])).decode()
print(plainText)
# 1. send response code and header
self.send_response(200)
self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
# 2. send response content
#self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8"))
# 3. check request body info
infoDict = json.loads(plainText)
#print("================")
# print(infoDict)
self.telemetryInfoCheck(infoDict)
# 4. shutdown the server and exit case
# 4. shutdown the server and exit case
assassin = threading.Thread(target=self.server.shutdown)
assassin.daemon = True
assassin.start()
@ -287,7 +287,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
# time.sleep(2)

View File

@ -100,9 +100,9 @@ def telemetryInfoCheck(infoDict=''):
if "compStorage" not in infoDict or infoDict["compStorage"] < 0:
tdLog.exit("compStorage is null!")
class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""
process GET request
@ -117,26 +117,26 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if contentEncoding == 'gzip':
req_body = self.rfile.read(int(self.headers["Content-Length"]))
plainText = gzip.decompress(req_body).decode()
else:
else:
plainText = self.rfile.read(int(self.headers["Content-Length"])).decode()
print("monitor info:\n%s"%plainText)
# 1. send response code and header
self.send_response(200)
self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
# 2. send response content
#self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8"))
# 3. check request body info
infoDict = json.loads(plainText)
#print("================")
#print(infoDict)
telemetryInfoCheck(infoDict)
# 4. shutdown the server and exit case
# 4. shutdown the server and exit case
assassin = threading.Thread(target=self.server.shutdown)
assassin.daemon = True
assassin.start()
@ -176,7 +176,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
# time.sleep(2)

View File

@ -512,7 +512,7 @@ class TDTestCase:
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
]
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,

View File

@ -190,7 +190,7 @@ class TDTestCase:
tdSql.execute("use db ")
tdSql.error("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
tdSql.error("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
# aggregate functions
tdSql.error("select udf2(num1) ,udf2(num2), udf2(num3) from tb")

View File

@ -514,7 +514,7 @@ class TDTestCase:
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
]
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,

View File

@ -1,7 +1,7 @@
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -16,7 +16,7 @@ class MyDnodes(TDDnodes):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
class TDTestCase:
def init(self,conn ,logSql):
@ -26,7 +26,7 @@ class TDTestCase:
self.master_dnode = self.TDDnodes.dnodes[0]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -43,7 +43,7 @@ class TDTestCase:
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -61,7 +61,7 @@ class TDTestCase:
def prepare_data(self):
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db replica 1 duration 300")
tdSql.execute("use db")
@ -71,7 +71,7 @@ class TDTestCase:
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
@ -142,7 +142,7 @@ class TDTestCase:
# create aggregate functions
tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
# functions = tdSql.getResult("show functions")
# function_nums = len(functions)
# if function_nums == 2:
@ -167,14 +167,14 @@ class TDTestCase:
# create aggregate functions
tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 2:
tdLog.info("create two udf functions success ")
def basic_udf_query(self , dnode):
mytdSql = self.getConnection(dnode)
# scalar functions
@ -229,7 +229,7 @@ class TDTestCase:
else:
tdLog.info(" UDF query check failed at :dnode_index %s" %dnode.index)
tdLog.exit("query check failed at :dnode_index %s" %dnode.index )
def check_UDF_query(self):
@ -238,10 +238,10 @@ class TDTestCase:
self.basic_udf_query(dnode)
def depoly_cluster(self ,dnodes_nums):
def depoly_cluster(self ,dnodes_nums):
testCluster = False
valgrind = 0
valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
@ -253,7 +253,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorFqdn", hostname)
dnode.addExtraCfg("monitorPort", 7043)
dnodes.append(dnode)
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@ -261,11 +261,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.start(dnode.index)
# create cluster
# create cluster
for dnode in self.TDDnodes.dnodes:
print(dnode.cfgDict)
@ -275,12 +275,12 @@ class TDTestCase:
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
print(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster done! ")
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
@ -288,23 +288,23 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
def restart_udfd(self, dnode):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
cfgPath = dnode.cfgDir
udfdPath = buildPath +'/build/bin/udfd'
for i in range(5):
tdLog.info(" loop restart udfd %d_th at dnode_index : %s" % (i ,dnode.index))
self.basic_udf_query(dnode)
# stop udfd cmds
# stop udfd cmds
get_processID = "ps -ef | grep -w udfd | grep %s | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'"%cfgPath
processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
stop_udfd = " kill -9 %s" % processID
@ -327,12 +327,12 @@ class TDTestCase:
# self.check_UDF_query()
self.restart_udfd(self.master_dnode)
# self.test_restart_udfd_All_dnodes()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -514,7 +514,7 @@ class TDTestCase:
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
]
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,

View File

@ -213,7 +213,7 @@ class TDTestCase:
tdSql.error("select irate(c1), abs(c1) from ct4 ")
# agg functions mix with agg functions
tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname ")
tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname order by tbname")
tdSql.checkData(0, 0, 0.000000000)
tdSql.checkData(1, 0, 0.000000000)
tdSql.checkData(0, 1, 13)

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -18,7 +18,7 @@ class MyDnodes(TDDnodes):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
class TDTestCase:
noConn = True
def init(self,conn ,logSql):
@ -29,7 +29,7 @@ class TDTestCase:
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -46,12 +46,12 @@ class TDTestCase:
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def depoly_cluster(self ,dnodes_nums):
def depoly_cluster(self ,dnodes_nums):
testCluster = False
valgrind = 0
valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
@ -63,7 +63,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorFqdn", hostname)
dnode.addExtraCfg("monitorPort", 7043)
dnodes.append(dnode)
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@ -71,11 +71,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
# create cluster
# create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
@ -84,7 +84,7 @@ class TDTestCase:
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\""
print(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster done! ")
@ -94,7 +94,7 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("show mnodes;")
tdSql.query("show mnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -120,7 +120,7 @@ class TDTestCase:
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
tdSql.query('show databases;')
tdSql.checkData(2,5,'off')
tdSql.error("alter database db strict 'off'")
@ -135,7 +135,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.five_dnode_one_mnode()
@ -145,4 +145,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -17,8 +17,8 @@ import subprocess
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import *
from clusterCommonCheck import *
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
@ -48,7 +48,7 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("show mnodes;")
tdSql.query("show mnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -63,7 +63,7 @@ class TDTestCase:
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
tdSql.checkRows(2)
tdSql.checkRows(2)
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
print("two mnodes is ready")
@ -73,7 +73,7 @@ class TDTestCase:
print("two mnodes is not ready in 10s ")
# fisrt check statut ready
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -106,7 +106,7 @@ class TDTestCase:
clusterComCheck.checkDnodes(5)
# restart all taosd
tdDnodes=cluster.dnodes
# stop follower
tdLog.info("stop follower")
tdDnodes[1].stoptaosd()
@ -118,7 +118,7 @@ class TDTestCase:
tdDnodes[1].starttaosd()
if clusterComCheck.checkMnodeStatus(2) :
print("both mnodes are ready")
# stop leader
tdLog.info("stop leader")
tdDnodes[0].stoptaosd()
@ -133,7 +133,7 @@ class TDTestCase:
if clusterComCheck.checkMnodeStatus(2) :
print("both mnodes are ready")
def run(self):
def run(self):
self.five_dnode_two_mnode()
@ -142,4 +142,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -118,7 +118,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -133,7 +133,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -142,7 +142,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
# dnode6=cluster.addDnode(6)
@ -166,7 +166,7 @@ class TDTestCase:
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
for tr in threads:
tr.start()
tr.start()
dnode6Port=int(6030+5*100)
tdSql.execute("create dnode '%s:%d'"%(hostname,dnode6Port))
clusterComCheck.checkDnodes(dnodeNumbers)
@ -179,7 +179,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -191,20 +191,20 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
@ -217,7 +217,7 @@ class TDTestCase:
stableName= '%s_%d'%(paraDict['stbName'],i)
tdSql.query("select * from %s"%stableName)
tdSql.checkRows(rowsPerStb)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
@ -226,4 +226,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from paramiko import HostKeys
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -19,7 +19,7 @@ class MyDnodes(TDDnodes):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
class TDTestCase:
def init(self,conn ,logSql):
@ -48,7 +48,7 @@ class TDTestCase:
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def insert_data(self,count):
# fisrt add data : db\stable\childtable\general table
for couti in count:
@ -70,10 +70,10 @@ class TDTestCase:
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def depoly_cluster(self ,dnodes_nums):
def depoly_cluster(self ,dnodes_nums):
testCluster = False
valgrind = 0
valgrind = 0
hostname = socket.gethostname()
tdLog.debug(hostname)
dnodes = []
@ -88,7 +88,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorPort", 7043)
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
dnodes.append(dnode)
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@ -96,11 +96,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
# create cluster
# create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# tdLog.debug(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
@ -109,7 +109,7 @@ class TDTestCase:
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
tdLog.debug(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
@ -118,8 +118,8 @@ class TDTestCase:
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
tdLog.debug("mnode is three nodes")
if tdSql.checkRows(3) :
tdLog.debug("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
@ -129,20 +129,20 @@ class TDTestCase:
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
tdLog.debug("three mnodes is ready in 10s")
break
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
tdLog.debug("three mnodes is ready in 10s")
break
break
count+=1
else:
tdLog.debug(tdSql.queryResult)
tdLog.debug("three mnodes is not ready in 10s ")
return -1
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
@ -169,11 +169,11 @@ class TDTestCase:
count+=1
else:
tdLog.debug("stop mnodes on dnode 2 failed in 10s ")
return -1
return -1
tdSql.error("drop mnode on dnode 1;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
@ -200,8 +200,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 2;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -229,8 +229,8 @@ class TDTestCase:
tdLog.debug("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -249,8 +249,8 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -270,8 +270,8 @@ class TDTestCase:
tdSql.query("show dnodes;")
tdLog.debug(tdSql.queryResult)
# drop follower of mnode
dropcount =0
# drop follower of mnode
dropcount =0
while dropcount <= 10:
for i in range(1,3):
tdLog.debug("drop mnode on dnode %d"%(i+1))
@ -306,7 +306,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
def run(self):
# tdLog.debug(self.master_dnode.cfgDict)
self.buildcluster(5)
self.five_dnode_three_mnode()
@ -316,4 +316,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -13,7 +13,7 @@ import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -37,7 +37,7 @@ class TDTestCase:
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -63,7 +63,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -74,7 +74,7 @@ class TDTestCase:
def createDbTbale(self,dbcountStart,dbcountStop,stbname,chilCount):
# fisrt add data : db\stable\childtable\general table
for couti in range(dbcountStart,dbcountStop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -98,7 +98,7 @@ class TDTestCase:
def insertTabaleData(self,dbcountStart,dbcountStop,stbname,chilCount,ts_start,rowCount):
# insert data : create childtable and data
for couti in range(dbcountStart,dbcountStop):
tdSql.execute("use db%d" %couti)
pre_insert = "insert into "
@ -115,7 +115,7 @@ class TDTestCase:
# print(sql)
tdSql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i)
# end sql
# end sql
if sql != pre_insert:
# print(sql)
print(len(sql))
@ -134,13 +134,13 @@ class TDTestCase:
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
return
return
def depoly_cluster(self ,dnodes_nums):
def depoly_cluster(self ,dnodes_nums):
testCluster = False
valgrind = 0
valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
@ -154,7 +154,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorPort", 7043)
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
dnodes.append(dnode)
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@ -162,11 +162,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
# create cluster
# create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
@ -175,7 +175,7 @@ class TDTestCase:
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
print(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
@ -185,8 +185,8 @@ class TDTestCase:
time.sleep(1)
statusReadyBumber=0
tdSql.query("show dnodes;")
if tdSql.checkRows(dnodenumber) :
print("dnode is %d nodes"%dnodenumber)
if tdSql.checkRows(dnodenumber) :
print("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
@ -203,15 +203,15 @@ class TDTestCase:
else:
print("%d mnodes is not ready in 10s "%dnodenumber)
return False
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
@ -221,19 +221,19 @@ class TDTestCase:
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
break
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("three mnodes is ready in 10s")
break
break
count+=1
else:
print("three mnodes is not ready in 10s ")
return -1
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
@ -263,8 +263,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 1;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
@ -291,8 +291,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 2;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -320,8 +320,8 @@ class TDTestCase:
print("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -348,8 +348,8 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -364,7 +364,7 @@ class TDTestCase:
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
tdLog.debug("stop all of mnode ")
tdLog.debug("stop all of mnode ")
# drop follower of mnode and insert data
self.createDbTbale(dbcountStart, dbcountStop,stbname,tablesPerStb)
@ -378,7 +378,7 @@ class TDTestCase:
rowsPerTable))
threads.start()
dropcount =0
dropcount =0
while dropcount <= 10:
for i in range(1,3):
tdLog.debug("drop mnode on dnode %d"%(i+1))
@ -415,7 +415,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.buildcluster(5)
self.five_dnode_three_mnode(5)
@ -425,4 +425,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -112,7 +112,7 @@ class TDTestCase:
}
username="user1"
passwd="123"
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
@ -120,7 +120,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -135,7 +135,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -162,10 +162,10 @@ class TDTestCase:
for i in range(tdSql.queryRows):
if tdSql.queryResult[i][0] == "%s"%username :
tdLog.info("create user:%s successfully"%username)
# # create database and stable
# clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
# tdLog.info("Take turns stopping Mnodes ")
# tdLog.info("Take turns stopping Mnodes ")
# tdDnodes=cluster.dnodes
# stopcount =0
@ -197,7 +197,7 @@ class TDTestCase:
# tdDnodes[i].stoptaosd()
# # sleep(10)
# tdDnodes[i].starttaosd()
# # sleep(10)
# # sleep(10)
# elif stopRole == "vnode":
# for i in range(vnodeNumbers):
# tdDnodes[i+mnodeNums].stoptaosd()
@ -209,7 +209,7 @@ class TDTestCase:
# tdDnodes[i].stoptaosd()
# # sleep(10)
# tdDnodes[i].starttaosd()
# # sleep(10)
# # sleep(10)
# # dnodeNumbers don't include database of schema
# if clusterComCheck.checkDnodes(dnodeNumbers):
@ -220,7 +220,7 @@ class TDTestCase:
# tdLog.exit("one or more of dnodes failed to start ")
# # self.check3mnode()
# stopcount+=1
# clusterComCheck.checkDnodes(dnodeNumbers)
# clusterComCheck.checkDbRows(dbNumbers)
@ -234,7 +234,7 @@ class TDTestCase:
# # tdSql.query("select * from %s"%stableName)
# # tdSql.checkRows(rowsPerStb)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
@ -243,4 +243,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -110,7 +110,7 @@ class TDTestCase:
"rowsPerTbl": 100,
"batchNum": 5000
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
@ -118,7 +118,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -133,7 +133,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -142,7 +142,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
@ -174,7 +174,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -186,7 +186,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@ -197,7 +197,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
@ -211,7 +211,7 @@ class TDTestCase:
# tdSql.query("select * from %s"%stableName)
# tdSql.checkRows(rowsPerStb)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
@ -220,4 +220,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -110,7 +110,7 @@ class TDTestCase:
"rowsPerTbl": 100,
"batchNum": 5000
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
@ -118,7 +118,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -133,7 +133,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -142,7 +142,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
@ -173,7 +173,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -185,7 +185,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@ -196,10 +196,10 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
@ -212,7 +212,7 @@ class TDTestCase:
# tdSql.query("select * from %s"%stableName)
# tdSql.checkRows(rowsPerStb)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
@ -221,4 +221,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -67,7 +67,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -107,13 +107,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -128,7 +128,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -149,7 +149,7 @@ class TDTestCase:
for tr in threads:
tr.start()
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@ -157,7 +157,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -169,7 +169,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@ -180,7 +180,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
tdLog.info("check dnode number:")
@ -196,7 +196,7 @@ class TDTestCase:
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='dnode')
@ -205,4 +205,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -81,13 +81,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -102,7 +102,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -111,7 +111,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
@ -130,7 +130,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -142,19 +142,19 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@ -169,7 +169,7 @@ class TDTestCase:
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
@ -178,4 +178,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -110,7 +110,7 @@ class TDTestCase:
"rowsPerTbl": 10000,
"batchNum": 5000
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
@ -118,7 +118,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -133,7 +133,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -142,7 +142,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
@ -171,7 +171,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -183,19 +183,19 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@ -209,7 +209,7 @@ class TDTestCase:
stableName= '%s_%d'%(paraDict['stbName'],i)
tdSql.query("select * from %s"%stableName)
tdSql.checkRows(rowsPerStb)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
@ -218,4 +218,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -82,13 +82,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -105,14 +105,14 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
tdLog.info("create database and stable")
tdLog.info("create database and stable")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
@ -124,7 +124,7 @@ class TDTestCase:
for tr in threads:
tr.start()
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@ -132,7 +132,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -144,7 +144,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@ -155,7 +155,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
tdLog.info("check dnode number:")
@ -170,7 +170,7 @@ class TDTestCase:
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='mnode')
@ -179,4 +179,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -82,13 +82,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -103,14 +103,14 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
tdLog.info("create database and stable")
tdLog.info("create database and stable")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
@ -122,7 +122,7 @@ class TDTestCase:
for tr in threads:
tr.start()
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@ -130,7 +130,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -142,7 +142,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@ -153,14 +153,14 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
tdLog.info("check dnode number:")
clusterComCheck.checkDnodes(dnodeNumbers)
tdSql.query("show databases")
tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers-2))
# tdLog.info("check DB Rows:")
# clusterComCheck.checkDbRows(allDbNumbers)
# tdLog.info("check DB Status on by on")
@ -168,7 +168,7 @@ class TDTestCase:
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode')
@ -177,4 +177,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -67,7 +67,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -106,13 +106,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -127,7 +127,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -148,7 +148,7 @@ class TDTestCase:
for tr in threads:
tr.start()
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
@ -157,7 +157,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -169,19 +169,19 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@ -195,7 +195,7 @@ class TDTestCase:
# tdSql.checkRows(allStbNumbers)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode')
@ -204,4 +204,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -82,13 +82,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -124,7 +124,7 @@ class TDTestCase:
for tr in threads:
tr.start()
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@ -132,7 +132,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -144,7 +144,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@ -155,7 +155,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@ -169,7 +169,7 @@ class TDTestCase:
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='vnode')
@ -178,4 +178,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -31,7 +31,7 @@ class TDTestCase:
tdSql.init(conn.cursor())
self.host = socket.gethostname()
print(tdSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -67,7 +67,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -106,13 +106,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
dbNumbers = 1
print(tdSql)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
@ -128,7 +128,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -137,12 +137,12 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
for i in range(restartNumbers):
stableName= '%s%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
@ -151,7 +151,7 @@ class TDTestCase:
for tr in threads:
tr.start()
tdLog.info("Take turns stopping Mnodes ")
tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@ -159,7 +159,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@ -171,19 +171,19 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@ -197,7 +197,7 @@ class TDTestCase:
tdSql.checkRows(allStbNumbers)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='vnode')
@ -206,4 +206,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -18,7 +18,7 @@ import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -54,7 +54,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -65,7 +65,7 @@ class TDTestCase:
def insert_data(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -96,7 +96,7 @@ class TDTestCase:
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
return
return
def checkdnodes(self,dnodenumber):
count=0
@ -104,8 +104,8 @@ class TDTestCase:
time.sleep(1)
statusReadyBumber=0
tdSql.query("show dnodes;")
if tdSql.checkRows(dnodenumber) :
print("dnode is %d nodes"%dnodenumber)
if tdSql.checkRows(dnodenumber) :
print("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
@ -122,15 +122,15 @@ class TDTestCase:
else:
print("%d mnodes is not ready in 10s "%dnodenumber)
return False
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.checkRows(3) :
print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
@ -140,19 +140,19 @@ class TDTestCase:
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
break
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("three mnodes is ready in 10s")
break
break
count+=1
else:
print("three mnodes is not ready in 10s ")
return -1
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
@ -182,8 +182,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 1;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
@ -210,8 +210,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 2;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -239,8 +239,8 @@ class TDTestCase:
print("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -258,15 +258,15 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
def five_dnode_three_mnode(self,dnodenumber):
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -281,15 +281,15 @@ class TDTestCase:
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
tdLog.debug("stop all of mnode ")
tdLog.debug("stop all of mnode ")
# seperate vnode and mnode in different dnodes.
# create database and stable
stopcount =0
stopcount =0
while stopcount < 2:
for i in range(dnodenumber):
# threads=[]
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
threads=threading.Thread(target=self.insert_data, args=(i,i+1))
threads.start()
self.TDDnodes.stoptaosd(i+1)
@ -306,13 +306,13 @@ class TDTestCase:
return False
# self.check3mnode()
self.check3mnode()
stopcount+=1
self.check3mnode()
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.five_dnode_three_mnode(5)
@ -321,4 +321,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -15,13 +15,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import *
from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
class TDTestCase:
def init(self,conn ,logSql):
@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = int(dnodenumbers * restartNumber)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -109,10 +109,10 @@ class TDTestCase:
clusterComCheck.checkMnodeStatus(3)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -15,13 +15,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import *
from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
class TDTestCase:
def init(self,conn ,logSql):
@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -99,7 +99,7 @@ class TDTestCase:
tdLog.info("check whether 2 mnode status is offline")
clusterComCheck.check3mnode2off()
# tdSql.error("create user user1 pass '123';")
tdLog.info("start two follower")
tdDnodes[1].starttaosd()
tdDnodes[2].starttaosd()
@ -107,10 +107,10 @@ class TDTestCase:
clusterComCheck.checkMnodeStatus(mnodeNums)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -16,13 +16,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import *
from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
class TDTestCase:
def init(self,conn ,logSql):
@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = int(dnodenumbers * restartNumber)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -96,21 +96,21 @@ class TDTestCase:
# restart all taosd
tdDnodes=cluster.dnodes
tdDnodes=cluster.dnodes
for i in range(mnodeNums):
tdDnodes[i].stoptaosd()
for j in range(dnodenumbers):
if j != i:
cluster.checkConnectStatus(j)
clusterComCheck.check3mnodeoff(i+1,3)
clusterComCheck.init(cluster.checkConnectStatus(i+1))
clusterComCheck.init(cluster.checkConnectStatus(i+1))
tdDnodes[i].starttaosd()
clusterComCheck.checkMnodeStatus(mnodeNums)
tdLog.info("Take turns stopping all dnodes ")
tdLog.info("Take turns stopping all dnodes ")
# seperate vnode and mnode in different dnodes.
# create database and stable
stopcount =0
stopcount =0
while stopcount < restartNumber:
tdLog.info("first restart loop")
for i in range(dnodenumbers):
@ -120,13 +120,13 @@ class TDTestCase:
clusterComCheck.checkDnodes(dnodenumbers)
clusterComCheck.checkMnodeStatus(mnodeNums)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(5,3,1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -15,13 +15,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import *
from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
class TDTestCase:
def init(self,conn ,logSql):
@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = 1
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -100,20 +100,20 @@ class TDTestCase:
# tdLog.info("check whether 2 mnode status is offline")
# clusterComCheck.check3mnode2off()
# tdSql.error("create user user1 pass '123';")
tdLog.info("start one mnode" )
tdDnodes[0].starttaosd()
clusterComCheck.check3mnodeoff(2)
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
clusterComCheck.checkDb(dbNumbers,1,'db0')
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -13,7 +13,7 @@ import time
import socket
import subprocess
from multiprocessing import Process
import threading
import threading
import time
import inspect
import ctypes
@ -36,7 +36,7 @@ class TDTestCase:
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -62,7 +62,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@ -73,7 +73,7 @@ class TDTestCase:
def insert_data(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@ -95,10 +95,10 @@ class TDTestCase:
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def depoly_cluster(self ,dnodes_nums):
def depoly_cluster(self ,dnodes_nums):
testCluster = False
valgrind = 0
valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
@ -112,7 +112,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorPort", 7043)
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
dnodes.append(dnode)
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@ -120,11 +120,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
# create cluster
# create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# tdLog.debug(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
@ -133,7 +133,7 @@ class TDTestCase:
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
tdLog.debug(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
@ -143,8 +143,8 @@ class TDTestCase:
time.sleep(1)
statusReadyBumber=0
tdSql.query("show dnodes;")
if tdSql.checkRows(dnodenumber) :
tdLog.debug("dnode is %d nodes"%dnodenumber)
if tdSql.checkRows(dnodenumber) :
tdLog.debug("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
@ -161,15 +161,15 @@ class TDTestCase:
else:
tdLog.debug("%d mnodes is not ready in 10s "%dnodenumber)
return False
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(3) :
tdLog.debug("mnode is three nodes")
if tdSql.checkRows(3) :
tdLog.debug("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
@ -179,19 +179,19 @@ class TDTestCase:
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
tdLog.debug("three mnodes is ready in 10s")
break
break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
tdLog.debug("three mnodes is ready in 10s")
break
break
count+=1
else:
tdLog.debug("three mnodes is not ready in 10s ")
return -1
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
@ -221,8 +221,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 1;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
@ -249,8 +249,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 2;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -278,8 +278,8 @@ class TDTestCase:
tdLog.debug("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.query("show mnodes;")
tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -296,8 +296,8 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.query("show mnodes;")
tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@ -312,13 +312,13 @@ class TDTestCase:
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
tdLog.debug(tdSql.queryResult)
tdLog.debug("stop all of mnode ")
tdLog.debug("stop all of mnode ")
stopcount =0
stopcount =0
while stopcount <= 2:
for i in range(dnodenumber):
# threads=[]
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
threads=threading.Thread(target=self.insert_data, args=((stopcount+i)*2,(i+stopcount)*2+2))
threads.start()
self.TDDnodes.stoptaosd(i+1)
@ -344,7 +344,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
def run(self):
# tdLog.debug(self.master_dnode.cfgDict)
self.buildcluster(5)
self.five_dnode_three_mnode(5)
@ -354,4 +354,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -15,13 +15,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import *
from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
class TDTestCase:
def init(self,conn ,logSql):
@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = int(dnodenumbers * restartNumber)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
# add some error operations and
# add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@ -93,10 +93,10 @@ class TDTestCase:
# restart all taosd
tdDnodes=cluster.dnodes
tdLog.info("Take turns stopping all dnodes ")
tdLog.info("Take turns stopping all dnodes ")
# seperate vnode and mnode in different dnodes.
# create database and stable
stopcount =0
stopcount =0
while stopcount <= 2:
tdLog.info(" restart loop: %d"%stopcount )
for i in range(dnodenumbers):
@ -106,10 +106,10 @@ class TDTestCase:
clusterComCheck.checkDnodes(dnodenumbers)
clusterComCheck.checkMnodeStatus(3)
def run(self):
def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(5,3,1)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")

View File

@ -48,10 +48,10 @@ class ClusterComCheck:
if tdSql.queryResult[i][4] == "ready":
status+=1
tdLog.info(status)
if status == dnodeNumbers:
tdLog.success("it find cluster with %d dnodes and check that all cluster dnodes are ready within 30s! " %dnodeNumbers)
return True
return True
count+=1
time.sleep(1)
else:
@ -77,15 +77,15 @@ class ClusterComCheck:
def checkDb(self,dbNumbers,restartNumber,dbNameIndex):
count=0
alldbNumbers=(dbNumbers*restartNumber)+2
while count < 5:
while count < 5:
query_status=0
for j in range(dbNumbers):
for i in range(alldbNumbers):
tdSql.query("show databases;")
if "%s_%d"%(dbNameIndex,j) == tdSql.queryResult[i][0] :
if "%s_%d"%(dbNameIndex,j) == tdSql.queryResult[i][0] :
if tdSql.queryResult[i][15] == "ready":
query_status+=1
tdLog.debug("check %s_%d that status is ready "%(dbNameIndex,j))
tdLog.debug("check %s_%d that status is ready "%(dbNameIndex,j))
else:
continue
# print(query_status)
@ -107,7 +107,7 @@ class ClusterComCheck:
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
return
return
def checkMnodeStatus(self,mnodeNums):
self.mnodeNums=int(mnodeNums)
@ -118,15 +118,15 @@ class ClusterComCheck:
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
if tdSql.checkRows(self.mnodeNums) :
if tdSql.checkRows(self.mnodeNums) :
tdLog.success("cluster has %d mnodes" %self.mnodeNums )
if self.mnodeNums == 1:
if tdSql.queryResult[0][2]== 'leader' and tdSql.queryResult[0][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
return True
count+=1
elif self.mnodeNums == 3 :
return True
count+=1
elif self.mnodeNums == 3 :
if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' :
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' :
@ -141,9 +141,9 @@ class ClusterComCheck:
if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' :
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
return True
return True
count+=1
elif self.mnodeNums == 2 :
elif self.mnodeNums == 2 :
if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' :
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
@ -157,7 +157,7 @@ class ClusterComCheck:
tdLog.debug(tdSql.queryResult)
tdLog.exit("cluster of %d mnodes is not ready in 10s " %self.mnodeNums)
def check3mnodeoff(self,offlineDnodeNo,mnodeNums=3):
@ -224,7 +224,7 @@ class ClusterComCheck:
else:
tdLog.debug(tdSql.queryResult)
tdLog.exit("stop mnodes on dnode %d failed in 10s ")

View File

@ -37,23 +37,23 @@ class ClusterComCreate:
tdSql.init(conn.cursor())
# tdSql.init(conn.cursor(), logSql) # output sql.txt file
def initConsumerTable(self,cdbName='cdb'):
def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName)
def initConsumerInfoTable(self,cdbName='cdb'):
def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@ -68,11 +68,11 @@ class ClusterComCreate:
break
else:
time.sleep(5)
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
return resultList
def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@ -82,14 +82,14 @@ class ClusterComCreate:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@ -142,7 +142,7 @@ class ClusterComCreate:
tdLog.debug("create table if not exists %s.%s_%d (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbNameIndex, stbNameIndex,i))
tsql.execute("create table if not exists %s.%s_%d (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbNameIndex, stbNameIndex,i))
tdLog.debug("complete to create %s.%s_%d" %(dbNameIndex, stbNameIndex,i))
return
return
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
@ -153,14 +153,14 @@ class ClusterComCreate:
tagValue = 'beijing'
if (i % 2 == 0):
tagValue = 'shanghai'
sql += " %s_%d using %s tags(%d, '%s')"%(ctbPrefix,i,stbName,i+1, tagValue)
if (i > 0) and (i%100 == 0):
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@ -189,7 +189,7 @@ class ClusterComCreate:
#print("insert sql:%s"%sql)
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
return
def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs):
tdLog.debug("start to insert data ............")
@ -235,7 +235,7 @@ class ClusterComCreate:
ctbDict[i] = 0
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsOfCtb = 0
rowsOfCtb = 0
while rowsOfCtb < rowsPerTbl:
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
@ -262,7 +262,7 @@ class ClusterComCreate:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsOfSql = 0
rowsOfSql = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
for j in range(rowsPerTbl):
@ -294,7 +294,7 @@ class ClusterComCreate:
for i in range(ctbNum):
tbName = '%s%s'%(ctbPrefix,i)
tdCom.insert_rows(tsql,dbname=paraDict["dbName"],tbname=tbName,start_ts_value=paraDict['startTs'],count=paraDict['rowsPerTbl'])
return
return
def threadFunction(self, **paraDict):
# create new connector for new tdSql instance in my thread

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -95,7 +95,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -126,7 +126,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
@ -135,4 +135,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -26,9 +26,9 @@ class TDTestCase:
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
self.replica = 1
self.replica = 1
self.vgroups = 2
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
def getBuildPath(self):
@ -101,7 +101,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -145,7 +145,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
'''
)
for i in range(tb_nums):
sub_tbname = "sub_tb_{}".format(i)
tdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i))
@ -164,7 +164,7 @@ class TDTestCase:
tdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1'))
tdSql.checkRows(tb_nums)
def run(self):
def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
self.create_db_replica_1_insertdatas(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)
@ -176,4 +176,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -26,9 +26,9 @@ class TDTestCase:
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 2
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
def getBuildPath(self):
@ -101,7 +101,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -145,7 +145,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
'''
)
for i in range(tb_nums):
sub_tbname = "sub_tb_{}".format(i)
tdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i))
@ -164,7 +164,7 @@ class TDTestCase:
tdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1'))
tdSql.checkRows(tb_nums)
def run(self):
def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
self.create_db_replica_3_insertdatas(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)
@ -176,4 +176,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@ -110,7 +110,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -142,7 +142,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@ -151,7 +151,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@ -162,11 +162,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@ -174,9 +174,9 @@ class TDTestCase:
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@ -184,8 +184,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@ -199,14 +199,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@ -224,7 +224,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@ -236,7 +236,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -249,7 +249,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -259,7 +259,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -271,7 +271,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -365,7 +365,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
@ -375,19 +375,19 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin stop dnode
# begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
# append rows of stablename when dnode stop
# append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
@ -400,20 +400,20 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode
# begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
# create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
@ -427,18 +427,18 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
@ -449,7 +449,7 @@ class TDTestCase:
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
@ -468,7 +468,7 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
# create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
@ -477,7 +477,7 @@ class TDTestCase:
self.current_thread.join()
def run(self):
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
@ -485,7 +485,7 @@ class TDTestCase:
self.sync_run_case()
# self.unsync_run_case()
def stop(self):
@ -493,4 +493,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@ -110,7 +110,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -142,7 +142,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@ -151,7 +151,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@ -162,11 +162,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@ -174,9 +174,9 @@ class TDTestCase:
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@ -184,8 +184,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@ -199,14 +199,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@ -220,12 +220,12 @@ class TDTestCase:
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@ -237,7 +237,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -250,7 +250,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -260,7 +260,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -272,7 +272,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -366,7 +366,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
@ -376,19 +376,19 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin stop dnode
# begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
# append rows of stablename when dnode stop
# append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
@ -401,20 +401,20 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode
# begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
# create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
@ -428,18 +428,18 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
@ -450,7 +450,7 @@ class TDTestCase:
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
@ -469,7 +469,7 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
# create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
@ -478,7 +478,7 @@ class TDTestCase:
self.current_thread.join()
def run(self):
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
@ -486,7 +486,7 @@ class TDTestCase:
# self.sync_run_case()
self.unsync_run_case()
def stop(self):
@ -494,4 +494,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@ -110,7 +110,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -142,7 +142,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@ -151,7 +151,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@ -162,11 +162,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@ -176,7 +176,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@ -184,8 +184,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@ -199,14 +199,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@ -225,7 +225,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@ -237,7 +237,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -250,7 +250,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -260,7 +260,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -272,7 +272,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -366,7 +366,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
@ -376,19 +376,19 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin stop dnode
# begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].forcestop()
self.wait_stop_dnode_OK()
# append rows of stablename when dnode stop
# append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
@ -401,20 +401,20 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode
# begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
# create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
@ -425,26 +425,26 @@ class TDTestCase:
time.sleep(0.5)
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
# force stop taosd by kill -9
# force stop taosd by kill -9
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
@ -455,7 +455,7 @@ class TDTestCase:
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
@ -474,7 +474,7 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
# create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
@ -488,7 +488,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
port = dnode_info[1].split(":")[-1]
break
else:
continue
@ -502,7 +502,7 @@ class TDTestCase:
os.system(ps_kill_taosd)
def run(self):
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
@ -510,7 +510,7 @@ class TDTestCase:
# self.sync_run_case()
self.unsync_run_case()
def stop(self):
@ -518,4 +518,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -28,9 +28,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 10
@ -182,7 +182,7 @@ class TDTestCase:
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
@ -194,7 +194,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -207,7 +207,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -217,7 +217,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -229,7 +229,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -239,7 +239,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def get_leader_infos(self ,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
@ -259,7 +259,7 @@ class TDTestCase:
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@ -276,25 +276,25 @@ class TDTestCase:
benchmark_build_path = self.getBuildPath() + '/build/bin/taosBenchmark'
tdLog.notice("==== start taosBenchmark insert datas of database {} ==== ".format(dbname))
os.system(" {} -f {} >>/dev/null 2>&1 ".format(benchmark_build_path , json_file))
def stop_leader_when_Benchmark_inserts(self,dbname , total_rows , json_file ):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
tdSql.execute(" drop database if exists {} ".format(dbname))
tdSql.execute(" create database {} replica {} vgroups {}".format(dbname , self.replica , self.vgroups))
# start insert datas using taosBenchmark ,expect insert 10000 rows
# start insert datas using taosBenchmark ,expect insert 10000 rows
self.current_thread = threading.Thread(target=self.start_benchmark_inserts, args=(dbname,json_file))
self.current_thread.start()
tdSql.query(" show databases ")
# make sure create database ok
# make sure create database ok
while (tdSql.queryRows!=3):
time.sleep(0.5)
tdSql.query(" show databases ")
# # make sure create stable ok
# # make sure create stable ok
tdSql.query(" show {}.stables ".format(dbname))
while (tdSql.queryRows!=1):
time.sleep(0.5)
@ -313,14 +313,14 @@ class TDTestCase:
tdLog.debug(" === current insert {} rows in database {} === ".format(tdSql.queryResult[0][0] , dbname))
time.sleep(0.01)
tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1"))
tdLog.debug(" === database {} has write {} rows at least ====".format(dbname,total_rows/10))
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# prepare stop leader of database
# prepare stop leader of database
before_leader_infos = self.get_leader_infos(dbname)
tdDnodes[self.stop_dnode_id-1].stoptaosd()
# self.current_thread.join()
after_leader_infos = self.get_leader_infos(dbname)
@ -331,7 +331,7 @@ class TDTestCase:
after_leader_infos = self.get_leader_infos(dbname)
revote_status = self.check_revote_leader_success(dbname ,before_leader_infos , after_leader_infos)
end = time.time()
time_cost = end - start
time_cost = end - start
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(dbname , time_cost))
self.current_thread.join()
@ -344,7 +344,7 @@ class TDTestCase:
def run(self):
def run(self):
# basic insert and check of cluster
# self.check_setup_cluster_status()
@ -359,4 +359,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -28,9 +28,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 10
@ -193,7 +193,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -225,7 +225,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@ -234,8 +234,8 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
@ -245,11 +245,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@ -257,9 +257,9 @@ class TDTestCase:
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@ -267,8 +267,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@ -282,14 +282,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {} ====".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@ -309,7 +309,7 @@ class TDTestCase:
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
@ -321,7 +321,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -334,7 +334,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -344,7 +344,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -356,7 +356,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -366,7 +366,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def get_leader_infos(self ,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
@ -386,7 +386,7 @@ class TDTestCase:
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@ -400,12 +400,12 @@ class TDTestCase:
return check_status
def force_stop_dnode(self, dnode_id ):
tdSql.query("show dnodes")
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
port = dnode_info[1].split(":")[-1]
break
else:
continue
@ -427,25 +427,25 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
# check rows of datas
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# get leader info before stop
# get leader info before stop
before_leader_infos = self.get_leader_infos(db_name)
# begin stop dnode
# force stop taosd by kill -9
# begin stop dnode
# force stop taosd by kill -9
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
# vote leaders check
# get leader info after stop
# get leader info after stop
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
# append rows of stablename when dnode stop make sure revote leaders
@ -470,7 +470,7 @@ class TDTestCase:
else:
tdLog.notice("===== leader of database {} is not ok , append rows fail =====".format(db_name))
# begin start dnode
# begin start dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
@ -478,29 +478,29 @@ class TDTestCase:
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again
# create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
tdDnodes=cluster.dnodes
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
# force stop taosd by kill -9
# get leader info before stop
# force stop taosd by kill -9
# get leader info before stop
before_leader_infos = self.get_leader_infos(db_name)
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
# check revote leader when restart servers
# get leader info after stop
# get leader info after stop
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
# append rows of stablename when dnode stop make sure revote leaders
@ -520,30 +520,30 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again
# create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
tdDnodes[self.stop_dnode_id-1].starttaosd()
start = time.time()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
'''
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
for loop in range(self.loop_restart_times):
db_name = "unsync_db_{}".format(loop)
@ -553,7 +553,7 @@ class TDTestCase:
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it
# create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
@ -564,7 +564,7 @@ class TDTestCase:
self.current_thread.join()
def run(self):
def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
@ -577,4 +577,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@ -40,7 +40,7 @@ class TDTestCase:
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@ -112,7 +112,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@ -123,11 +123,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@ -137,7 +137,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@ -145,8 +145,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@ -160,14 +160,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@ -186,7 +186,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@ -198,7 +198,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -211,7 +211,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -221,7 +221,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -233,7 +233,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -327,7 +327,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def force_stop_dnode(self, dnode_id ):
@ -335,7 +335,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
port = dnode_info[1].split(":")[-1]
break
else:
continue
@ -349,9 +349,9 @@ class TDTestCase:
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
sql = "select * from {}.{} ;".format(dbname , stablename)
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
@ -364,18 +364,18 @@ class TDTestCase:
self.thread_list.append(task)
for thread in self.thread_list:
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
# let query task start
# let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
@ -390,22 +390,22 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
for thread in self.thread_list:
thread.join()
def run(self):
def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
def stop(self):
@ -413,4 +413,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@ -40,7 +40,7 @@ class TDTestCase:
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@ -112,7 +112,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@ -123,11 +123,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@ -137,7 +137,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@ -145,8 +145,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@ -160,14 +160,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@ -186,7 +186,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@ -198,7 +198,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -211,7 +211,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -221,7 +221,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -233,7 +233,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -327,7 +327,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def force_stop_dnode(self, dnode_id ):
@ -335,7 +335,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
port = dnode_info[1].split(":")[-1]
break
else:
continue
@ -349,9 +349,9 @@ class TDTestCase:
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
sql = "select * from {}.{} ;".format(dbname , stablename)
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
@ -364,18 +364,18 @@ class TDTestCase:
self.thread_list.append(task)
for thread in self.thread_list:
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
# let query task start
# let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
@ -390,22 +390,22 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
for thread in self.thread_list:
thread.join()
def run(self):
def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
def stop(self):
@ -413,4 +413,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@ -40,7 +40,7 @@ class TDTestCase:
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@ -112,7 +112,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@ -123,11 +123,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@ -137,7 +137,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@ -145,8 +145,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@ -160,14 +160,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@ -186,7 +186,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
@ -198,7 +198,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -211,7 +211,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -229,7 +229,7 @@ class TDTestCase:
tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@ -243,7 +243,7 @@ class TDTestCase:
return check_status
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -255,7 +255,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -349,10 +349,10 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def get_leader_infos(self ,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
@ -369,7 +369,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
port = dnode_info[1].split(":")[-1]
break
else:
continue
@ -383,9 +383,9 @@ class TDTestCase:
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
sql = "select * from {}.{} ;".format(dbname , stablename)
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
@ -398,35 +398,35 @@ class TDTestCase:
self.thread_list.append(task)
for thread in self.thread_list:
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
# let query task start
# let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
for loop in range(self.loop_restart_times):
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
# get leader info before stop
# get leader info before stop
before_leader_infos = self.get_leader_infos(self.db_name)
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
tdDnodes[self.stop_dnode_id-1].stoptaosd()
start = time.time()
# get leader info after stop
# get leader info after stop
after_leader_infos = self.get_leader_infos(self.db_name)
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
while not revote_status:
@ -434,7 +434,7 @@ class TDTestCase:
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
end = time.time()
time_cost = end - start
time_cost = end - start
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost))
self.wait_stop_dnode_OK()
@ -444,22 +444,22 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
for thread in self.thread_list:
thread.join()
def run(self):
def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
def stop(self):
@ -467,4 +467,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@ -40,7 +40,7 @@ class TDTestCase:
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@ -112,7 +112,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@ -123,11 +123,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@ -137,7 +137,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@ -145,8 +145,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@ -160,14 +160,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@ -186,7 +186,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
@ -198,7 +198,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
@ -211,7 +211,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -229,7 +229,7 @@ class TDTestCase:
tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@ -243,7 +243,7 @@ class TDTestCase:
return check_status
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@ -255,7 +255,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -349,10 +349,10 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def get_leader_infos(self ,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
@ -369,7 +369,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
port = dnode_info[1].split(":")[-1]
break
else:
continue
@ -383,9 +383,9 @@ class TDTestCase:
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
sql = "select * from {}.{} ;".format(dbname , stablename)
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
@ -398,35 +398,35 @@ class TDTestCase:
self.thread_list.append(task)
for thread in self.thread_list:
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
# let query task start
# let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
for loop in range(self.loop_restart_times):
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
# get leader info before stop
# get leader info before stop
before_leader_infos = self.get_leader_infos(self.db_name)
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
self.force_stop_dnode(self.stop_dnode_id)
start = time.time()
# get leader info after stop
# get leader info after stop
after_leader_infos = self.get_leader_infos(self.db_name)
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
while not revote_status:
@ -434,7 +434,7 @@ class TDTestCase:
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
end = time.time()
time_cost = end - start
time_cost = end - start
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost))
self.wait_stop_dnode_OK()
@ -444,22 +444,22 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
for thread in self.thread_list:
thread.join()
def run(self):
def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
def stop(self):
@ -467,4 +467,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -25,9 +25,9 @@ class TDTestCase:
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
self.replica = 1
self.replica = 1
self.vgroups = 2
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.max_vote_time_cost = 10 # seconds
@ -101,7 +101,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -134,7 +134,7 @@ class TDTestCase:
vgroup_id = vgroup_info[0]
vgroup_status = []
for ind , role in enumerate(vgroup_info[3:-4]):
if ind%2==0:
continue
else:
@ -151,7 +151,7 @@ class TDTestCase:
while not status:
time.sleep(0.1)
status = self.check_vgroups_init_done(dbname)
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
@ -159,16 +159,16 @@ class TDTestCase:
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
return cost_time
def test_init_vgroups_time_costs(self):
tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ")
tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
# create database replica 3 vgroups 1
# create database replica 3 vgroups 1
db1 = 'db_1'
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
@ -189,10 +189,10 @@ class TDTestCase:
tdLog.notice('=======database {} replica 3 vgroups 100 ======'.format(db3))
tdSql.execute(create_db_replica_3_vgroups_100)
self.vote_leader_time_costs(db3)
def run(self):
def run(self):
self.check_setup_cluster_status()
self.test_init_vgroups_time_costs()
@ -203,4 +203,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -4,7 +4,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import os
from util.log import *
from util.sql import *
@ -13,7 +13,7 @@ from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import time
import time
import random
import socket
import subprocess
@ -27,9 +27,9 @@ class TDTestCase:
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
self.replica = 1
self.replica = 1
self.vgroups = 2
self.tb_nums = 10
self.tb_nums = 10
self.row_nums = 100
self.max_vote_time_cost = 10 # seconds
self.stop_dnode = None
@ -104,7 +104,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@ -133,7 +133,7 @@ class TDTestCase:
self.stop_dnode = random.sample(only_dnode_list , 1 )[0]
return self.stop_dnode
def check_vgroups_revote_leader(self,dbname):
status = True
@ -145,7 +145,7 @@ class TDTestCase:
vgroup_status = []
vgroups_leader_follower = vgroup_info[3:-4]
for ind , role in enumerate(vgroups_leader_follower):
if ind%2==0:
if role == stop_dnode_id and vgroups_leader_follower[ind+1]=="offline":
tdLog.notice("====== dnode {} has offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
@ -174,7 +174,7 @@ class TDTestCase:
if endpoint == self.stop_dnode:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="offline":
@ -184,7 +184,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , endpoint is {}".format(self.stop_dnode))
def wait_start_dnode_OK(self):
def _get_status():
status = ""
@ -196,7 +196,7 @@ class TDTestCase:
if endpoint == self.stop_dnode:
status = dnode_status
break
return status
return status
status = _get_status()
while status !="ready":
@ -205,8 +205,8 @@ class TDTestCase:
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has restart , endpoint is {}".format(self.stop_dnode))
def random_stop_One_dnode(self):
self.stop_dnode = self._get_stop_dnode()
stop_dnode_id = self.dnode_list[self.stop_dnode][0]
@ -217,7 +217,7 @@ class TDTestCase:
# os.system("taos -s 'show dnodes;'")
def Restart_stop_dnode(self):
tdDnodes=cluster.dnodes
stop_dnode_id = self.dnode_list[self.stop_dnode][0]
tdDnodes[stop_dnode_id-1].starttaosd()
@ -225,7 +225,7 @@ class TDTestCase:
# os.system("taos -s 'show dnodes;'")
def check_vgroups_init_done(self,dbname):
status = True
tdSql.query("show {}.vgroups".format(dbname))
@ -233,7 +233,7 @@ class TDTestCase:
vgroup_id = vgroup_info[0]
vgroup_status = []
for ind , role in enumerate(vgroup_info[3:-4]):
if ind%2==0:
continue
else:
@ -249,7 +249,7 @@ class TDTestCase:
while not status:
time.sleep(0.1)
status = self.check_vgroups_init_done(dbname)
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
@ -257,10 +257,10 @@ class TDTestCase:
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
return cost_time
def revote_leader_time_costs(self,dbname):
start = time.time()
@ -268,7 +268,7 @@ class TDTestCase:
while not status:
time.sleep(0.1)
status = self.check_vgroups_revote_leader(dbname)
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
@ -276,10 +276,10 @@ class TDTestCase:
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s revote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
return cost_time
def exec_revote_action(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
@ -296,13 +296,13 @@ class TDTestCase:
after_vgroups = set()
for vgroup_info in after_revote:
after_vgroups.add(vgroup_info[3:-4])
vote_act = set(set(after_vgroups)-set(before_vgroups))
if not vote_act:
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
for ind , role in enumerate(vgroup_info):
if role==self.dnode_list[self.stop_dnode][0]:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@ -322,7 +322,7 @@ class TDTestCase:
tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ")
tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
# create database replica 3 vgroups 1
# create database replica 3 vgroups 1
db1 = 'db_1'
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
@ -346,13 +346,13 @@ class TDTestCase:
tdSql.execute(create_db_replica_3_vgroups_100)
self.vote_leader_time_costs(db3)
self.exec_revote_action(db3)
def run(self):
def run(self):
self.check_setup_cluster_status()
self.test_init_vgroups_time_costs()
@ -361,4 +361,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -56,7 +56,7 @@ class TDTestCase:
return cur
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
@ -69,7 +69,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
@ -96,7 +96,7 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
@ -115,7 +115,7 @@ class TDTestCase:
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
parameterDict["startTs"])
parameterDict["startTs"])
return
@ -135,34 +135,34 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
prepareEnvThread.join()
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
break
else:
time.sleep(1)
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column'
topicFromCtb = 'topic_ctb_column'
topicFromCtb = 'topic_ctb_column'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
time.sleep(1)
tdSql.query("show topics")
#tdSql.checkRows(2)
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromCtb:
tdLog.exit("topic error1")
tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromCtb:
tdLog.exit("topic error2")
tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)")
@ -179,7 +179,7 @@ class TDTestCase:
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
@ -190,26 +190,26 @@ class TDTestCase:
break
else:
time.sleep(1)
tdLog.info("start consume processor")
pollDelay = 20
showMsg = 1
showRow = 1
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
os.system(shellCmd)
# wait for data ready
# prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
@ -229,7 +229,7 @@ class TDTestCase:
tdSql.query("drop topic %s"%topicFromCtb)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 2: add child table with consuming ")
# create and start thread
@ -246,13 +246,13 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
prepareEnvThread.join()
# wait db ready
while 1:
tdSql.query("show databases")
if tdSql.getRows() == 4:
print ('==================================================')
print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0))
print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0))
index = 0
if tdSql.getData(0,0) == parameterDict['dbName']:
index = 0
@ -264,7 +264,7 @@ class TDTestCase:
index = 3
else:
continue
if tdSql.getData(index,15) == 'ready':
print("******************** index: %d"%index)
break
@ -272,12 +272,12 @@ class TDTestCase:
continue
else:
time.sleep(1)
tdSql.query("use %s"%parameterDict['dbName'])
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
if tdSql.getRows() == 1:
break
else:
time.sleep(1)
@ -285,20 +285,20 @@ class TDTestCase:
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column2'
topicFromCtb = 'topic_ctb_column2'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
time.sleep(1)
tdSql.query("show topics")
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromCtb:
tdLog.exit("topic error1")
tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromCtb:
tdLog.exit("topic error2")
tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
@ -316,7 +316,7 @@ class TDTestCase:
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
@ -327,21 +327,21 @@ class TDTestCase:
break
else:
time.sleep(1)
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
os.system(shellCmd)
# create new child table and insert data
newCtbName = 'newctb'
@ -354,7 +354,7 @@ class TDTestCase:
# wait for data ready
prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
@ -366,7 +366,7 @@ class TDTestCase:
tdSql.checkData(0 , 1, consumerId)
tdSql.checkData(0 , 3, expectrowcnt)
tdSql.query("drop topic %s"%topicFromStb)
tdSql.query("drop topic %s"%topicFromCtb)
@ -390,13 +390,13 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
prepareEnvThread.join()
# wait db ready
while 1:
tdSql.query("show databases")
if tdSql.getRows() == 5:
if tdSql.getRows() == 5:
print ('==================================================dbname: %s'%parameterDict['dbName'])
print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),tdSql.getData(3,0),tdSql.getData(4,0))
print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),tdSql.getData(3,0),tdSql.getData(4,0))
index = 0
if tdSql.getData(0,0) == parameterDict['dbName']:
index = 0
@ -409,8 +409,8 @@ class TDTestCase:
elif tdSql.getData(4,0) == parameterDict['dbName']:
index = 4
else:
continue
continue
if tdSql.getData(index,15) == 'ready':
print("******************** index: %d"%index)
break
@ -418,16 +418,16 @@ class TDTestCase:
continue
else:
time.sleep(1)
tdSql.query("use %s"%parameterDict['dbName'])
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
if tdSql.getRows() == 1:
if tdSql.getRows() == 1:
break
else:
time.sleep(1)
tdLog.info("create stable2 for the seconde topic")
parameterDict2 = {'cfg': '', \
'dbName': 'db3', \
@ -439,23 +439,23 @@ class TDTestCase:
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict2['cfg'] = cfgPath
tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName']))
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column3'
topicFromStb2 = 'topic_stb_column32'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict2['dbName'], parameterDict2['stbName']))
tdSql.query("show topics")
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromStb2:
tdLog.exit("topic error1")
tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromStb2:
tdLog.exit("topic error2")
tdLog.exit("topic error2")
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
@ -472,7 +472,7 @@ class TDTestCase:
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
@ -483,22 +483,22 @@ class TDTestCase:
break
else:
time.sleep(1)
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
os.system(shellCmd)
# start the second thread to create new child table and insert data
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
@ -507,7 +507,7 @@ class TDTestCase:
# wait for data ready
prepareEnvThread.join()
prepareEnvThread2.join()
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
@ -519,7 +519,7 @@ class TDTestCase:
tdSql.checkData(0 , 1, consumerId)
tdSql.checkData(0 , 3, expectrowcnt)
tdSql.query("drop topic %s"%topicFromStb)
tdSql.query("drop topic %s"%topicFromStb2)
@ -537,7 +537,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
self.tmqCase2(cfgPath, buildPath)
self.tmqCase2(cfgPath, buildPath)
self.tmqCase3(cfgPath, buildPath)
def stop(self):

View File

@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@ -58,12 +58,12 @@ class TDTestCase:
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("flush db to let data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
return
@ -93,18 +93,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@ -121,29 +121,29 @@ class TDTestCase:
paraDict['batchNum'] = 100
paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
pInsertThread.join()
tdSql.query(queryString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
if expectRowsList[0] != resultList[0]:
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("flush database %s"%(paraDict['dbName']))
for i in range(len(topicNameList)):
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
@ -173,18 +173,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@ -200,36 +200,36 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeRows = resultList[0]
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
if not (expectrowcnt <= actConsumeRows and totalRowsInserted >= actConsumeRows):
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeRows = resultList[0]
tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
if not ((actConsumeRows >= expectrowcnt) and (totalRowsInserted > actConsumeRows)):
tdLog.exit("%d tmq consume rows error!"%consumerId)
for i in range(len(topicNameList)):
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")

View File

@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 10000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@ -58,12 +58,12 @@ class TDTestCase:
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("flush db to let data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
return
@ -93,18 +93,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@ -121,29 +121,29 @@ class TDTestCase:
paraDict['batchNum'] = 100
paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
pInsertThread.join()
tdSql.query(queryString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
if expectRowsList[0] != resultList[0]:
tdLog.exit("%d tmq consume rows error!"%consumerId)
tmqCom.checkFileContent(consumerId, queryString)
tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("flush database %s"%(paraDict['dbName']))
for i in range(len(topicNameList)):
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
@ -173,18 +173,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@ -200,36 +200,36 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeRows = resultList[0]
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
if not (expectrowcnt <= actConsumeRows and totalRowsInserted >= actConsumeRows):
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeRows = resultList[0]
tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
if not ((actConsumeRows >= expectrowcnt) and (totalRowsInserted > actConsumeRows)):
tdLog.exit("%d tmq consume rows error!"%consumerId)
for i in range(len(topicNameList)):
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")

View File

@ -56,12 +56,12 @@ class TDTestCase:
print(cur)
return cur
def initConsumerTable(self,cdbName='cdb'):
def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("drop database if exists %s "%(cdbName))
tdSql.query("create database %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
@ -75,7 +75,7 @@ class TDTestCase:
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@ -90,11 +90,11 @@ class TDTestCase:
break
else:
time.sleep(5)
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@ -102,14 +102,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@ -139,7 +139,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@ -158,7 +158,7 @@ class TDTestCase:
ctbDict[i] = 0
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsOfCtb = 0
rowsOfCtb = 0
while rowsOfCtb < rowsPerTbl:
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
@ -185,7 +185,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsOfSql = 0
rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
@ -216,7 +216,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsOfSql = 0
rowsOfSql = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
for j in range(rowsPerTbl):
@ -235,8 +235,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareEnv(self, **parameterDict):
def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@ -255,7 +255,7 @@ class TDTestCase:
return
def tmqCase1(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 1: ")
tdLog.printNoPrefix("======== test case 1: ")
'''
subscribe one db, multi normal table which have not same schema, and include rows of all tables in one insert sql
'''
@ -274,11 +274,11 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
self.create_database(tdSql, parameterDict["dbName"])
tdSql.execute("create table %s.ntb0 (ts timestamp, c1 int)"%(parameterDict["dbName"]))
tdSql.execute("create table %s.ntb1 (ts timestamp, c1 int, c2 float)"%(parameterDict["dbName"]))
tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(parameterDict["dbName"]))
tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(parameterDict["dbName"]))
tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(parameterDict["dbName"]))
tdSql.execute("insert into %s.ntb0 values(now, 1) %s.ntb1 values(now, 1, 1) %s.ntb2 values(now, 1, 1, '1') %s.ntb3 values(now, 1, 1, '1', now)"%(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"]))
@ -301,7 +301,7 @@ class TDTestCase:
tdLog.info("create topics from db")
topicFromDb = 'topic_db_mulit_tbl'
tdSql.execute("create topic %s as database %s" %(topicFromDb, parameterDict['dbName']))
consumerId = 0
expectrowcnt = numOfNtb * rowsOfPerNtb
@ -324,7 +324,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@ -334,7 +334,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 2: ")
tdLog.printNoPrefix("======== test case 2: ")
'''
subscribe one stb, multi child talbe and normal table which have not same schema, and include rows of all tables in one insert sql
'''
@ -355,7 +355,7 @@ class TDTestCase:
parameterDict['cfg'] = cfgPath
dbName = parameterDict["dbName"]
self.create_database(tdSql, dbName)
tdSql.execute("create stable %s.stb (ts timestamp, s1 bigint, s2 binary(32), s3 double) tags (t1 int, t2 binary(32))"%(dbName))
@ -364,7 +364,7 @@ class TDTestCase:
tdSql.execute("create table %s.ntb0 (ts timestamp, c1 binary(32))"%(dbName))
tdSql.execute("create table %s.ntb1 (ts timestamp, c1 binary(32), c2 float)"%(dbName))
tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(dbName))
tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(dbName))
tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(dbName))
tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-11') \
@ -401,7 +401,7 @@ class TDTestCase:
tdLog.info("create topics from db")
topicFromStb = 'topic_stb_mulit_tbl'
tdSql.execute("create topic %s as stable %s.stb" %(topicFromStb, dbName))
consumerId = 0
expectrowcnt = numOfCtb * rowsOfPerNtb
@ -424,7 +424,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@ -445,7 +445,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
self.tmqCase2(cfgPath, buildPath)
self.tmqCase2(cfgPath, buildPath)
def stop(self):
tdSql.close()

View File

@ -38,20 +38,20 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
@ -84,7 +84,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replica)
tdLog.info("create stb")
@ -101,13 +101,13 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.asyncInsertDataByInterlace(paraDict)
tmqCom.create_ntable(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=1)
tmqCom.insert_rows_into_ntbl(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"], startTs=paraDict["startTs"], tblNum=1, rows=2) # tdLog.info("restart taosd to ensure that the data falls into the disk")
tmqCom.insert_rows_into_ntbl(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"], startTs=paraDict["startTs"], tblNum=1, rows=2) # tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("drop database %s"%paraDict["dbName"])
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
# create and start thread
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
@ -132,14 +132,14 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicFromStb1
@ -166,13 +166,13 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
# tmqCom.checkFileContent(consumerId, queryString)
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
tdSql.query("drop topic %s"%topicFromStb1)

View File

@ -56,12 +56,12 @@ class TDTestCase:
print(cur)
return cur
def initConsumerTable(self,cdbName='cdb'):
def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("drop database if exists %s "%(cdbName))
tdSql.query("create database %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
@ -75,7 +75,7 @@ class TDTestCase:
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@ -90,11 +90,11 @@ class TDTestCase:
break
else:
time.sleep(5)
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@ -103,9 +103,9 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@ -135,7 +135,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@ -154,7 +154,7 @@ class TDTestCase:
ctbDict[i] = 0
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsOfCtb = 0
rowsOfCtb = 0
while rowsOfCtb < rowsPerTbl:
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
@ -181,7 +181,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsOfSql = 0
rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
@ -212,7 +212,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsOfSql = 0
rowsOfSql = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
for j in range(rowsPerTbl):
@ -231,8 +231,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareEnv(self, **parameterDict):
def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@ -265,7 +265,7 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
tdLog.info("create database, super table, child table, normal table")
ntbName = 'ntb1'
self.create_database(tdSql, parameterDict["dbName"])
@ -278,10 +278,10 @@ class TDTestCase:
tdLog.info("create topics from super table and normal table")
columnTopicFromStb = 'column_topic_from_stb1'
columnTopicFromNtb = 'column_topic_from_ntb1'
tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
tdLog.info("======== super table test:")
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
@ -341,12 +341,12 @@ class TDTestCase:
tdLog.info("======== child table test:")
parameterDict['stbName'] = 'stb12'
ctbName = 'stb12_0'
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
tdLog.info("create topics from child table")
columnTopicFromCtb = 'column_topic_from_ctb1'
tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
@ -388,7 +388,7 @@ class TDTestCase:
tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t3 int"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t4 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
@ -406,7 +406,7 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
ntbName = 'ntb2'
@ -416,18 +416,18 @@ class TDTestCase:
tdLog.info("create topics from super table and normal table")
columnTopicFromStb = 'column_topic_from_stb2'
columnTopicFromNtb = 'column_topic_from_ntb2'
tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s where c3 > 3 and c4 like 'abc'" %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
tdLog.info("======== super table test:")
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName']))
@ -485,12 +485,12 @@ class TDTestCase:
tdLog.info("======== child table test:")
parameterDict['stbName'] = 'stb21'
ctbName = 'stb21_0'
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
tdLog.info("create topics from child table")
columnTopicFromCtb = 'column_topic_from_ctb2'
tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
@ -536,11 +536,11 @@ class TDTestCase:
tdSql.query("alter table %s.%s add column c5 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t5 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdLog.printNoPrefix("======== test case 2 end ...... ")
def tmqCase3(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 3: ")
tdLog.printNoPrefix("======== test case 3: ")
parameterDict = {'cfg': '', \
'actionType': 0, \
'dbName': 'db3', \
@ -554,7 +554,7 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
ntbName = 'ntb3'
@ -564,19 +564,19 @@ class TDTestCase:
tdLog.info("create topics from super table and normal table")
columnTopicFromStb = 'star_topic_from_stb3'
columnTopicFromNtb = 'star_topic_from_ntb3'
tdSql.execute("create topic %s as select * from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
tdLog.info("======== super table test:")
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t5"%(parameterDict['dbName'], parameterDict['stbName']))
@ -627,12 +627,12 @@ class TDTestCase:
tdLog.info("======== child table test:")
parameterDict['stbName'] = 'stb31'
ctbName = 'stb31_0'
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
tdLog.info("create topics from child table")
columnTopicFromCtb = 'column_topic_from_ctb3'
tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
@ -647,7 +647,7 @@ class TDTestCase:
tdSql.query("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName))
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
@ -662,7 +662,7 @@ class TDTestCase:
# alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName']))
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
@ -679,7 +679,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 4: ")
tdLog.printNoPrefix("======== test case 4: ")
parameterDict = {'cfg': '', \
'actionType': 0, \
'dbName': 'db4', \
@ -695,7 +695,7 @@ class TDTestCase:
parameterDict['cfg'] = cfgPath
ctbName = 'stb4_0'
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
@ -703,7 +703,7 @@ class TDTestCase:
tdLog.info("create topics from super table")
columnTopicFromStb = 'star_topic_from_stb4'
tdSql.execute("create topic %s as stable %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdLog.info("======== child table test:")
@ -739,10 +739,10 @@ class TDTestCase:
tdSql.query("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t1new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
@ -750,7 +750,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 4 end ...... ")
def tmqCase5(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 5: ")
tdLog.printNoPrefix("======== test case 5: ")
parameterDict = {'cfg': '', \
'actionType': 0, \
'dbName': 'db5', \
@ -766,7 +766,7 @@ class TDTestCase:
parameterDict['cfg'] = cfgPath
ctbName = 'stb5_0'
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
@ -774,7 +774,7 @@ class TDTestCase:
tdLog.info("create topics from super table")
columnTopicFromStb = 'star_topic_from_db5'
tdSql.execute("create topic %s as database %s" %(columnTopicFromStb, parameterDict['dbName']))
tdLog.info("======== child table test:")
@ -810,10 +810,10 @@ class TDTestCase:
tdSql.query("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t1new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
@ -821,7 +821,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 5 end ...... ")
def tmqCase6(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 6: ")
tdLog.printNoPrefix("======== test case 6: ")
parameterDict = {'cfg': '', \
'actionType': 0, \
'dbName': 'db6', \
@ -835,18 +835,18 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
tdLog.info("======== child table test:")
parameterDict['stbName'] = 'stb6'
ctbName = 'stb6_0'
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
tdLog.info("create topics from child table")
columnTopicFromCtb = 'column_topic_from_ctb6'
tdSql.execute("create topic %s as select c1, c2, c3 from %s.%s where t1 > 10 and t2 = 'beijign' and sin(t3) < 0" %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
tdSql.error("alter table %s.%s modify column c1 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
@ -861,7 +861,7 @@ class TDTestCase:
tdSql.error("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName))
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
@ -876,7 +876,7 @@ class TDTestCase:
# alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName']))
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
@ -903,7 +903,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
self.tmqCase2(cfgPath, buildPath)
self.tmqCase2(cfgPath, buildPath)
self.tmqCase3(cfgPath, buildPath)
self.tmqCase4(cfgPath, buildPath)
self.tmqCase5(cfgPath, buildPath)

View File

@ -79,27 +79,27 @@ class TDTestCase:
topicNameList = ['topic1', 'topic2', 'topic3']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 4 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
queryString = "select ts, log(c1), cos(c1) from %s.%s where c1 > 5000" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
queryString = "select ts, log(c1), atan(c1) from %s.%s where ts >= %d" %(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9000)
sqlString = "create topic %s as %s" %(topicNameList[2], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@ -115,10 +115,10 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
@ -132,7 +132,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@ -148,14 +148,14 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[2] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
time.sleep(10)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@ -193,7 +193,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
queryString = "select ts, sin(c1), pow(c2,3) from %s.%s where sin(c2) >= 0" %(paraDict['dbName'], paraDict['stbName'])
@ -209,7 +209,7 @@ class TDTestCase:
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# start tmq consume processor
tdLog.info("insert consume info to consume processor")
consumerId = 0
@ -223,10 +223,10 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
@ -240,7 +240,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@ -256,14 +256,14 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[2] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
# time.sleep(10)
# time.sleep(10)
# for i in range(len(topicNameList)):
# tdSql.query("drop topic %s"%topicNameList[i])

View File

@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 10000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@ -65,11 +65,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
@ -95,7 +95,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# update to half tables
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
@ -103,16 +103,16 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_UpperCase_stb1'
topicFromStb1 = 'topic_UpperCase_stb1'
# queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
@ -139,18 +139,18 @@ class TDTestCase:
tdLog.info("run select sql from db")
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQuery))
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
tmqCom.checkFileContent(consumerId, queryString)
tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@ -170,15 +170,15 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
# update to half tables
# paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@ -187,17 +187,17 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_UpperCase_stb1'
topicFromStb1 = 'topic_UpperCase_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
# queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
@ -213,13 +213,13 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@ -229,7 +229,7 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
@ -237,8 +237,8 @@ class TDTestCase:
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@ -251,14 +251,14 @@ class TDTestCase:
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()

View File

@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@ -65,11 +65,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
@ -95,7 +95,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# update to half tables
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
@ -103,16 +103,16 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_UpperCase_stb1'
topicFromStb1 = 'topic_UpperCase_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
# queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
@ -139,18 +139,18 @@ class TDTestCase:
tdLog.info("run select sql from db")
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQuery))
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@ -170,15 +170,15 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
# update to half tables
# paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@ -187,17 +187,17 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_UpperCase_stb1'
topicFromStb1 = 'topic_UpperCase_stb1'
# queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
@ -213,7 +213,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 7/10)
paraDict['ctbStartIdx'] = int(paraDict['ctbNum'] * 7/10)
# paraDict["rowsPerTbl"] = 100
@ -221,7 +221,7 @@ class TDTestCase:
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@ -231,7 +231,7 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt / 2:
@ -239,8 +239,8 @@ class TDTestCase:
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error when snapshot is 1!")
# tmqCom.checkFileContent(consumerId, queryString)
# tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@ -253,13 +253,13 @@ class TDTestCase:
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
self.tmqCase2()
def stop(self):
tdSql.close()

Some files were not shown because too many files have changed in this diff Show More