Merge branch '3.0' into feat/idxFix
This commit is contained in:
commit
a081c7c154
|
@ -79,6 +79,7 @@ def pre_test(){
|
|||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${CHANGE_BRANCH}" >>${WKDIR}/jenkins.log
|
||||
echo "community log: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
|
@ -95,6 +96,7 @@ def pre_test(){
|
|||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${CHANGE_BRANCH}" >>${WKDIR}/jenkins.log
|
||||
echo "tdinternal log: `git log -5`" >>${WKDIR}/jenkins.log
|
||||
git fetch origin +refs/pull/${CHANGE_ID}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
|
|
|
@ -42,12 +42,13 @@ enum {
|
|||
typedef enum EStreamType {
|
||||
STREAM_NORMAL = 1,
|
||||
STREAM_INVERT,
|
||||
STREAM_REPROCESS,
|
||||
STREAM_CLEAR,
|
||||
STREAM_INVALID,
|
||||
STREAM_GET_ALL,
|
||||
STREAM_DELETE,
|
||||
STREAM_RETRIEVE,
|
||||
STREAM_PUSH_DATA,
|
||||
STREAM_PUSH_EMPTY,
|
||||
} EStreamType;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -224,6 +224,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
|
|||
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
|
||||
|
||||
int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
|
||||
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
|
||||
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
|
||||
SSDataBlock* createDataBlock();
|
||||
int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
|
||||
|
@ -236,6 +237,8 @@ void blockCompressEncode(const SSDataBlock* pBlock, char* data, int32_t*
|
|||
const char* blockCompressDecode(SSDataBlock* pBlock, int32_t numOfCols, int32_t numOfRows, const char* pData);
|
||||
|
||||
void blockDebugShowData(const SArray* dataBlocks, const char* flag);
|
||||
// for debug
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
|
||||
|
||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId,
|
||||
tb_uid_t suid);
|
||||
|
|
|
@ -319,7 +319,7 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
|
|||
return -1;
|
||||
}
|
||||
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
|
||||
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK) {
|
||||
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
|
||||
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
|
|
|
@ -32,12 +32,15 @@ typedef struct SUpdateInfo {
|
|||
int64_t interval;
|
||||
int64_t watermark;
|
||||
TSKEY minTS;
|
||||
SScalableBf* pCloseWinSBF;
|
||||
} SUpdateInfo;
|
||||
|
||||
SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark);
|
||||
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
|
||||
bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts);
|
||||
void updateInfoDestroy(SUpdateInfo *pInfo);
|
||||
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
|
||||
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -617,12 +617,12 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod
|
|||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self};
|
||||
SSchedulerReq req = {.pConn = &conn,
|
||||
.pNodeList = pNodeList,
|
||||
.pDag = pDag,
|
||||
.sql = pRequest->sqlstr,
|
||||
.startTs = pRequest->metric.start,
|
||||
.fp = schdExecCallback,
|
||||
.cbParam = &res};
|
||||
.pNodeList = pNodeList,
|
||||
.pDag = pDag,
|
||||
.sql = pRequest->sqlstr,
|
||||
.startTs = pRequest->metric.start,
|
||||
.fp = schdExecCallback,
|
||||
.cbParam = &res};
|
||||
|
||||
int32_t code = schedulerAsyncExecJob(&req, &pRequest->body.queryJob);
|
||||
|
||||
|
@ -669,13 +669,13 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
|
|||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self};
|
||||
SSchedulerReq req = {.pConn = &conn,
|
||||
.pNodeList = pNodeList,
|
||||
.pDag = pDag,
|
||||
.sql = pRequest->sqlstr,
|
||||
.startTs = pRequest->metric.start,
|
||||
.fp = NULL,
|
||||
.cbParam = NULL,
|
||||
.reqKilled = &pRequest->killed};
|
||||
.pNodeList = pNodeList,
|
||||
.pDag = pDag,
|
||||
.sql = pRequest->sqlstr,
|
||||
.startTs = pRequest->metric.start,
|
||||
.fp = NULL,
|
||||
.cbParam = NULL,
|
||||
.reqKilled = &pRequest->killed};
|
||||
|
||||
int32_t code = schedulerExecJob(&req, &pRequest->body.queryJob, &res);
|
||||
pRequest->body.resInfo.execRes = res.res;
|
||||
|
|
|
@ -199,10 +199,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
|
|||
return pResInfo->userFields;
|
||||
}
|
||||
|
||||
|
||||
TAOS_RES *taos_query(TAOS *taos, const char *sql) {
|
||||
return taosQueryImpl(taos, sql, false);
|
||||
}
|
||||
TAOS_RES *taos_query(TAOS *taos, const char *sql) { return taosQueryImpl(taos, sql, false); }
|
||||
|
||||
TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
||||
if (res == NULL) {
|
||||
|
@ -593,11 +590,11 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) {
|
|||
return pResInfo->pCol[columnIndex].offset;
|
||||
}
|
||||
|
||||
int taos_validate_sql(TAOS *taos, const char *sql) {
|
||||
TAOS_RES* pObj = taosQueryImpl(taos, sql, true);
|
||||
int taos_validate_sql(TAOS *taos, const char *sql) {
|
||||
TAOS_RES *pObj = taosQueryImpl(taos, sql, true);
|
||||
|
||||
int code = taos_errno(pObj);
|
||||
|
||||
|
||||
taos_free_result(pObj);
|
||||
return code;
|
||||
}
|
||||
|
@ -884,10 +881,10 @@ void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
|
|||
|
||||
int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
||||
const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list
|
||||
int32_t code = 0;
|
||||
SRequestObj *pRequest = NULL;
|
||||
SCatalogReq catalogReq = {0};
|
||||
|
||||
int32_t code = 0;
|
||||
SRequestObj *pRequest = NULL;
|
||||
SCatalogReq catalogReq = {0};
|
||||
|
||||
if (NULL == tableNameList) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -911,26 +908,25 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
goto _return;
|
||||
}
|
||||
|
||||
SCatalog* pCtg = NULL;
|
||||
SCatalog *pCtg = NULL;
|
||||
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCtg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _return;
|
||||
}
|
||||
|
||||
char* sql = "taos_load_table_info";
|
||||
char *sql = "taos_load_table_info";
|
||||
code = buildRequest(pTscObj, sql, strlen(sql), &pRequest);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
||||
SSyncQueryParam param = {0};
|
||||
tsem_init(¶m.sem, 0, 0);
|
||||
param.pRequest = pRequest;
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self};
|
||||
SRequestConnInfo conn = {
|
||||
.pTrans = pTscObj->pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self};
|
||||
|
||||
conn.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
|
@ -951,7 +947,6 @@ _return:
|
|||
return code;
|
||||
}
|
||||
|
||||
|
||||
TAOS_STMT *taos_stmt_init(TAOS *taos) {
|
||||
STscObj *pObj = acquireTscObj(*(int64_t *)taos);
|
||||
if (NULL == pObj) {
|
||||
|
|
|
@ -1164,7 +1164,7 @@ int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows)
|
|||
|
||||
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows) {
|
||||
int32_t code = 0;
|
||||
ASSERT(numOfRows > 0);
|
||||
// ASSERT(numOfRows > 0);
|
||||
|
||||
if (numOfRows == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1230,6 +1230,32 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
|
||||
ASSERT(src != NULL && dst != NULL);
|
||||
|
||||
blockDataCleanup(dst);
|
||||
int32_t code = blockDataEnsureCapacity(dst, src->info.rows);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(src->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pDst = taosArrayGet(dst->pDataBlock, i);
|
||||
SColumnInfoData* pSrc = taosArrayGet(src->pDataBlock, i);
|
||||
if (pSrc->pData == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
colDataAssign(pDst, pSrc, src->info.rows, &src->info);
|
||||
}
|
||||
|
||||
dst->info.rows = src->info.rows;
|
||||
dst->info.window = src->info.window;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
|
||||
if (pDataBlock == NULL) {
|
||||
return NULL;
|
||||
|
@ -1627,6 +1653,57 @@ void blockDebugShowData(const SArray* dataBlocks, const char* flag) {
|
|||
}
|
||||
}
|
||||
|
||||
// for debug
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) {
|
||||
int32_t size = 2048;
|
||||
*pDataBuf = taosMemoryCalloc(size, 1);
|
||||
char* dumpBuf = *pDataBuf;
|
||||
char pBuf[128] = {0};
|
||||
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
int32_t len = 0;
|
||||
len += snprintf(dumpBuf + len, size - len, "\n%s |block type %d |child id %d|\n", flag,
|
||||
(int32_t)pDataBlock->info.type, pDataBlock->info.childId);
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
len += snprintf(dumpBuf + len, size - len, "%s |", flag);
|
||||
for (int32_t k = 0; k < colNum; k++) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pDataBlock->pDataBlock, k);
|
||||
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
|
||||
if (colDataIsNull(pColInfoData, rows, j, NULL)) {
|
||||
len += snprintf(dumpBuf + len, size - len, " %15s |", "NULL");
|
||||
continue;
|
||||
}
|
||||
switch (pColInfoData->info.type) {
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
formatTimestamp(pBuf, *(uint64_t*)var, TSDB_TIME_PRECISION_MILLI);
|
||||
len += snprintf(dumpBuf + len, size - len, " %25s |", pBuf);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15d |", *(int32_t*)var);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15u |", *(uint32_t*)var);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15ld |", *(int64_t*)var);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15lu |", *(uint64_t*)var);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15f |", *(float*)var);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
len += snprintf(dumpBuf + len, size - len, " %15lf |", *(double*)var);
|
||||
break;
|
||||
}
|
||||
}
|
||||
len += snprintf(dumpBuf + len, size - len, "\n");
|
||||
}
|
||||
len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag);
|
||||
return dumpBuf;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief TODO: Assume that the final generated result it less than 3M
|
||||
*
|
||||
|
|
|
@ -64,6 +64,7 @@ void mndCleanupPrivilege(SMnode *pMnode);
|
|||
|
||||
int32_t mndCheckOperPrivilege(SMnode *pMnode, const char *user, EOperType operType);
|
||||
int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb);
|
||||
int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *name);
|
||||
int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, int32_t showType);
|
||||
int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter);
|
||||
|
||||
|
|
|
@ -33,6 +33,10 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw);
|
|||
|
||||
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
||||
int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||
// for sma
|
||||
// TODO refactor
|
||||
int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||
int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -431,6 +431,10 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
|
|||
goto SUBSCRIBE_OVER;
|
||||
}
|
||||
|
||||
if (mndCheckDbPrivilegeByName(pMnode, pMsg->info.conn.user, MND_OPER_READ_DB, pTopic->db) != 0) {
|
||||
goto SUBSCRIBE_OVER;
|
||||
}
|
||||
|
||||
#if 0
|
||||
// ref topic to prevent drop
|
||||
// TODO make topic complete
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndUser.h"
|
||||
#include "mndDb.h"
|
||||
|
||||
int32_t mndInitPrivilege(SMnode *pMnode) { return 0; }
|
||||
|
||||
|
@ -133,15 +134,7 @@ int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType
|
|||
if (pUser->sysInfo) goto _OVER;
|
||||
}
|
||||
|
||||
if (operType == MND_OPER_ALTER_DB) {
|
||||
if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER;
|
||||
}
|
||||
|
||||
if (operType == MND_OPER_DROP_DB) {
|
||||
if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER;
|
||||
}
|
||||
|
||||
if (operType == MND_OPER_COMPACT_DB) {
|
||||
if (operType == MND_OPER_ALTER_DB || operType == MND_OPER_DROP_DB || operType == MND_OPER_COMPACT_DB) {
|
||||
if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER;
|
||||
}
|
||||
|
||||
|
@ -168,3 +161,12 @@ _OVER:
|
|||
mndReleaseUser(pMnode, pUser);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *name) {
|
||||
SDbObj *pDb = mndAcquireDb(pMnode, name);
|
||||
if (pDb == NULL) return -1;
|
||||
|
||||
int32_t code = mndCheckDbPrivilege(pMnode, user, operType, pDb);
|
||||
mndReleaseDb(pMnode, pDb);
|
||||
return code;
|
||||
}
|
|
@ -15,11 +15,11 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mndSma.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndInfoSchema.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndScheduler.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndStb.h"
|
||||
|
@ -857,6 +857,23 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
|
|||
mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
|
||||
mndTransSetDbName(pTrans, pDb->name, NULL);
|
||||
|
||||
SStreamObj *pStream = mndAcquireStream(pMnode, pSma->name);
|
||||
if (pStream == NULL || pStream->smaId != pSma->uid) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
goto _OVER;
|
||||
} else {
|
||||
if (mndDropStreamTasks(pMnode, pTrans, pStream) < 0) {
|
||||
mError("stream:%s, failed to drop task since %s", pStream->name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
// drop stream
|
||||
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
goto _OVER;
|
||||
}
|
||||
}
|
||||
if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
|
||||
if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
|
||||
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
*/
|
||||
|
||||
#include "mndStream.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndScheduler.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndStb.h"
|
||||
|
@ -437,10 +437,6 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
if (mndCheckDbPrivilege(pMnode, user, MND_OPER_WRITE_DB, pDb) != 0) {
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
int32_t numOfStbs = -1;
|
||||
if (mndGetNumOfStbs(pMnode, pDb->name, &numOfStbs) != 0) {
|
||||
goto _OVER;
|
||||
|
@ -494,7 +490,7 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) {
|
||||
int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) {
|
||||
int32_t lv = taosArrayGetSize(pStream->tasks);
|
||||
for (int32_t i = 0; i < lv; i++) {
|
||||
SArray *pTasks = taosArrayGetP(pStream->tasks, i);
|
||||
|
@ -542,19 +538,6 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
// TODO check read auth for source and write auth for target
|
||||
#if 0
|
||||
pDb = mndAcquireDb(pMnode, createStreamReq.sourceDB);
|
||||
if (pDb == NULL) {
|
||||
terrno = TSDB_CODE_MND_DB_NOT_SELECTED;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) {
|
||||
goto _OVER;
|
||||
}
|
||||
#endif
|
||||
|
||||
// build stream obj from request
|
||||
SStreamObj streamObj = {0};
|
||||
if (mndBuildStreamObjFromCreateReq(pMnode, &streamObj, &createStreamReq) < 0) {
|
||||
|
@ -592,6 +575,16 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_READ_DB, streamObj.sourceDb) != 0) {
|
||||
mndTransDrop(pTrans);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, streamObj.targetDb) != 0) {
|
||||
mndTransDrop(pTrans);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
// execute creation
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||
|
@ -641,13 +634,9 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
// todo check auth
|
||||
pUser = mndAcquireUser(pMnode, pReq->info.conn.user);
|
||||
if (pUser == NULL) {
|
||||
goto DROP_STREAM_OVER;
|
||||
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) {
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
|
||||
if (pTrans == NULL) {
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
*/
|
||||
|
||||
#include "mndTopic.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndConsumer.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndOffset.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndStb.h"
|
||||
#include "mndSubscribe.h"
|
||||
|
@ -401,6 +401,10 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
|
|||
}
|
||||
} else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
SStbObj *pStb = mndAcquireStb(pMnode, pCreate->subStbName);
|
||||
if (pStb == NULL) {
|
||||
terrno = TSDB_CODE_MND_STB_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
topicObj.stbUid = pStb->uid;
|
||||
}
|
||||
/*} else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {*/
|
||||
|
@ -480,7 +484,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pDb) != 0) {
|
||||
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_DB, pDb) != 0) {
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
|
@ -571,6 +575,10 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
|
|||
}
|
||||
#endif
|
||||
|
||||
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_READ_DB, pTopic->db) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
|
||||
mndTransSetDbName(pTrans, pTopic->db, NULL);
|
||||
if (pTrans == NULL) {
|
||||
|
|
|
@ -257,7 +257,7 @@ int32_t sndProcessSMsg(SSnode *pSnode, SRpcMsg *pMsg) {
|
|||
case TDMT_STREAM_TASK_RECOVER_RSP:
|
||||
return sndProcessTaskRecoverRsp(pSnode, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_RSP:
|
||||
return sndProcessTaskRecoverRsp(pSnode, pMsg);
|
||||
return sndProcessTaskRetrieveRsp(pSnode, pMsg);
|
||||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
|
|
|
@ -116,18 +116,18 @@ typedef void *tsdbReaderT;
|
|||
#define BLOCK_LOAD_TABLE_SEQ_ORDER 2
|
||||
#define BLOCK_LOAD_TABLE_RR_ORDER 3
|
||||
|
||||
int32_t tsdbSetTableList(tsdbReaderT reader, SArray* tableList);
|
||||
tsdbReaderT tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *tableList, uint64_t qId,
|
||||
uint64_t taskId);
|
||||
tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *groupList, uint64_t qId,
|
||||
void *pMemRef);
|
||||
int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo);
|
||||
bool isTsdbCacheLastRow(tsdbReaderT *pReader);
|
||||
int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list);
|
||||
int32_t tsdbGetCtbIdList(SMeta *pMeta, int64_t suid, SArray *list);
|
||||
void *tsdbGetIdx(SMeta *pMeta);
|
||||
void *tsdbGetIvtIdx(SMeta *pMeta);
|
||||
int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle);
|
||||
int32_t tsdbSetTableList(tsdbReaderT reader, SArray *tableList);
|
||||
tsdbReaderT tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *tableList, uint64_t qId,
|
||||
uint64_t taskId);
|
||||
tsdbReaderT tsdbQueryCacheLast(SVnode *pVnode, SQueryTableDataCond *pCond, STableListInfo *groupList, uint64_t qId,
|
||||
void *pMemRef);
|
||||
int32_t tsdbGetFileBlocksDistInfo(tsdbReaderT *pReader, STableBlockDistInfo *pTableBlockInfo);
|
||||
bool isTsdbCacheLastRow(tsdbReaderT *pReader);
|
||||
int32_t tsdbGetAllTableList(SMeta *pMeta, uint64_t uid, SArray *list);
|
||||
int32_t tsdbGetCtbIdList(SMeta *pMeta, int64_t suid, SArray *list);
|
||||
void *tsdbGetIdx(SMeta *pMeta);
|
||||
void *tsdbGetIvtIdx(SMeta *pMeta);
|
||||
int64_t tsdbGetNumOfRowsInMemTable(tsdbReaderT *pHandle);
|
||||
|
||||
bool tsdbNextDataBlock(tsdbReaderT pTsdbReadHandle);
|
||||
void tsdbRetrieveDataBlockInfo(tsdbReaderT *pTsdbReadHandle, SDataBlockInfo *pBlockInfo);
|
||||
|
@ -150,8 +150,7 @@ int32_t tqReadHandleRemoveTbUidList(STqReadHandle *pHandle, const SArray *tbUidL
|
|||
int32_t tqReadHandleSetMsg(STqReadHandle *pHandle, SSubmitReq *pMsg, int64_t ver);
|
||||
bool tqNextDataBlock(STqReadHandle *pHandle);
|
||||
bool tqNextDataBlockFilterOut(STqReadHandle *pHandle, SHashObj *filterOutUids);
|
||||
int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle *pHandle, uint64_t *pGroupId, uint64_t *pUid,
|
||||
int32_t *pNumOfRows);
|
||||
int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReadHandle *pHandle);
|
||||
|
||||
// sma
|
||||
int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
|
||||
|
@ -196,6 +195,7 @@ struct SVnodeCfg {
|
|||
typedef struct {
|
||||
TSKEY lastKey;
|
||||
uint64_t uid;
|
||||
uint64_t groupId;
|
||||
} STableKeyInfo;
|
||||
|
||||
struct SMetaEntry {
|
||||
|
|
|
@ -112,7 +112,7 @@ int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkR
|
|||
tqReadHandleSetMsg(pReader, pReq, 0);
|
||||
while (tqNextDataBlock(pReader)) {
|
||||
SSDataBlock block = {0};
|
||||
if (tqRetrieveDataBlock(&block, pReader, &block.info.groupId, &block.info.uid, &block.info.rows) < 0) {
|
||||
if (tqRetrieveDataBlock(&block, pReader) < 0) {
|
||||
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
|
||||
ASSERT(0);
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkR
|
|||
tqReadHandleSetMsg(pReader, pReq, 0);
|
||||
while (tqNextDataBlockFilterOut(pReader, pExec->execDb.pFilterOutTbUid)) {
|
||||
SSDataBlock block = {0};
|
||||
if (tqRetrieveDataBlock(&block, pReader, &block.info.groupId, &block.info.uid, &block.info.rows) < 0) {
|
||||
if (tqRetrieveDataBlock(&block, pReader) < 0) {
|
||||
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) continue;
|
||||
ASSERT(0);
|
||||
}
|
||||
|
|
|
@ -109,11 +109,15 @@ int32_t tqReadHandleSetMsg(STqReadHandle* pReadHandle, SSubmitReq* pMsg, int64_t
|
|||
}
|
||||
|
||||
bool tqNextDataBlock(STqReadHandle* pHandle) {
|
||||
if (pHandle->pMsg == NULL) return false;
|
||||
while (1) {
|
||||
if (tGetSubmitMsgNext(&pHandle->msgIter, &pHandle->pBlock) < 0) {
|
||||
return false;
|
||||
}
|
||||
if (pHandle->pBlock == NULL) return false;
|
||||
if (pHandle->pBlock == NULL) {
|
||||
pHandle->pMsg = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pHandle->tbIdHash == NULL) {
|
||||
return true;
|
||||
|
@ -142,10 +146,7 @@ bool tqNextDataBlockFilterOut(STqReadHandle* pHandle, SHashObj* filterOutUids) {
|
|||
return false;
|
||||
}
|
||||
|
||||
int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle, uint64_t* pGroupId, uint64_t* pUid,
|
||||
int32_t* pNumOfRows) {
|
||||
*pUid = 0;
|
||||
|
||||
int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle) {
|
||||
// TODO: cache multiple schema
|
||||
int32_t sversion = htonl(pHandle->pBlock->sversion);
|
||||
if (pHandle->cachedSchemaSuid == 0 || pHandle->cachedSchemaVer != sversion ||
|
||||
|
@ -176,7 +177,6 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle, uint64_
|
|||
STSchema* pTschema = pHandle->pSchema;
|
||||
SSchemaWrapper* pSchemaWrapper = pHandle->pSchemaWrapper;
|
||||
|
||||
*pNumOfRows = pHandle->msgIter.numOfRows;
|
||||
int32_t colNumNeed = taosArrayGetSize(pHandle->pColIdList);
|
||||
|
||||
if (colNumNeed == 0) {
|
||||
|
@ -217,22 +217,22 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle, uint64_
|
|||
}
|
||||
}
|
||||
|
||||
if (blockDataEnsureCapacity(pBlock, *pNumOfRows) < 0) {
|
||||
if (blockDataEnsureCapacity(pBlock, pHandle->msgIter.numOfRows) < 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
int32_t colActual = blockDataGetNumOfCols(pBlock);
|
||||
|
||||
// TODO in stream shuffle case, fetch groupId
|
||||
*pGroupId = 0;
|
||||
|
||||
STSRowIter iter = {0};
|
||||
tdSTSRowIterInit(&iter, pTschema);
|
||||
STSRow* row;
|
||||
int32_t curRow = 0;
|
||||
|
||||
tInitSubmitBlkIter(&pHandle->msgIter, pHandle->pBlock, &pHandle->blkIter);
|
||||
*pUid = pHandle->msgIter.uid; // set the uid of table for submit block
|
||||
|
||||
pBlock->info.groupId = 0;
|
||||
pBlock->info.uid = pHandle->msgIter.uid; // set the uid of table for submit block
|
||||
pBlock->info.rows = pHandle->msgIter.numOfRows;
|
||||
|
||||
while ((row = tGetSubmitBlkNext(&pHandle->blkIter)) != NULL) {
|
||||
tdSTSRowIterReset(&iter, row);
|
||||
|
|
|
@ -18,57 +18,87 @@
|
|||
SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, bool createTb, int64_t suid,
|
||||
const char* stbFullName, int32_t vgId) {
|
||||
SSubmitReq* ret = NULL;
|
||||
SArray* schemaReqs = NULL;
|
||||
SArray* schemaReqSz = NULL;
|
||||
SArray* tagArray = taosArrayInit(1, sizeof(STagVal));
|
||||
if (!tagArray) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// cal size
|
||||
int32_t cap = sizeof(SSubmitReq);
|
||||
int32_t sz = taosArrayGetSize(pBlocks);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
// TODO min
|
||||
int32_t rowSize = pDataBlock->info.rowSize;
|
||||
int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema);
|
||||
int32_t schemaLen = 0;
|
||||
|
||||
if (createTb) {
|
||||
SVCreateTbReq createTbReq = {0};
|
||||
char* cname = buildCtbNameByGroupId(stbFullName, pDataBlock->info.groupId);
|
||||
createTbReq.name = cname;
|
||||
createTbReq.flags = 0;
|
||||
createTbReq.type = TSDB_CHILD_TABLE;
|
||||
createTbReq.ctb.suid = suid;
|
||||
|
||||
STagVal tagVal = {
|
||||
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.i64 = (int64_t)pDataBlock->info.groupId,
|
||||
if (createTb) {
|
||||
schemaReqs = taosArrayInit(sz, sizeof(void*));
|
||||
schemaReqSz = taosArrayInit(sz, sizeof(int32_t));
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
STagVal tagVal = {
|
||||
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.i64 = (int64_t)pDataBlock->info.groupId,
|
||||
};
|
||||
STag* pTag = NULL;
|
||||
taosArrayClear(tagArray);
|
||||
taosArrayPush(tagArray, &tagVal);
|
||||
tTagNew(tagArray, 1, false, &pTag);
|
||||
if (pTag == NULL) {
|
||||
tdDestroySVCreateTbReq(&createTbReq);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosArrayDestroy(tagArray);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SVCreateTbReq createTbReq = {0};
|
||||
createTbReq.name = buildCtbNameByGroupId(stbFullName, pDataBlock->info.groupId);
|
||||
createTbReq.flags = 0;
|
||||
createTbReq.type = TSDB_CHILD_TABLE;
|
||||
createTbReq.ctb.suid = suid;
|
||||
createTbReq.ctb.pTag = (uint8_t*)pTag;
|
||||
|
||||
int32_t code;
|
||||
int32_t schemaLen;
|
||||
tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code);
|
||||
|
||||
tdDestroySVCreateTbReq(&createTbReq);
|
||||
if (code < 0) {
|
||||
tdDestroySVCreateTbReq(&createTbReq);
|
||||
taosArrayDestroy(tagArray);
|
||||
taosMemoryFreeClear(ret);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void* schemaStr = taosMemoryMalloc(schemaLen);
|
||||
if (schemaStr == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
taosArrayPush(schemaReqs, &schemaStr);
|
||||
taosArrayPush(schemaReqSz, &schemaLen);
|
||||
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, schemaStr, schemaLen);
|
||||
code = tEncodeSVCreateTbReq(&encoder, &createTbReq);
|
||||
if (code < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
tdDestroySVCreateTbReq(&createTbReq);
|
||||
}
|
||||
}
|
||||
taosArrayDestroy(tagArray);
|
||||
|
||||
// cal size
|
||||
int32_t cap = sizeof(SSubmitReq);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
// TODO min
|
||||
int32_t rowSize = pDataBlock->info.rowSize;
|
||||
int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema);
|
||||
|
||||
int32_t schemaLen = 0;
|
||||
if (createTb) {
|
||||
schemaLen = *(int32_t*)taosArrayGet(schemaReqSz, i);
|
||||
}
|
||||
cap += sizeof(SSubmitBlk) + schemaLen + rows * maxLen;
|
||||
}
|
||||
|
||||
|
@ -99,55 +129,13 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
|
||||
int32_t schemaLen = 0;
|
||||
if (createTb) {
|
||||
SVCreateTbReq createTbReq = {0};
|
||||
char* cname = buildCtbNameByGroupId(stbFullName, pDataBlock->info.groupId);
|
||||
createTbReq.name = cname;
|
||||
createTbReq.flags = 0;
|
||||
createTbReq.type = TSDB_CHILD_TABLE;
|
||||
createTbReq.ctb.suid = suid;
|
||||
|
||||
STagVal tagVal = {
|
||||
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.i64 = (int64_t)pDataBlock->info.groupId,
|
||||
};
|
||||
taosArrayClear(tagArray);
|
||||
taosArrayPush(tagArray, &tagVal);
|
||||
STag* pTag = NULL;
|
||||
tTagNew(tagArray, 1, false, &pTag);
|
||||
if (pTag == NULL) {
|
||||
tdDestroySVCreateTbReq(&createTbReq);
|
||||
taosArrayDestroy(tagArray);
|
||||
taosMemoryFreeClear(ret);
|
||||
return NULL;
|
||||
}
|
||||
createTbReq.ctb.pTag = (uint8_t*)pTag;
|
||||
|
||||
int32_t code;
|
||||
tEncodeSize(tEncodeSVCreateTbReq, &createTbReq, schemaLen, code);
|
||||
if (code < 0) {
|
||||
tdDestroySVCreateTbReq(&createTbReq);
|
||||
taosArrayDestroy(tagArray);
|
||||
taosMemoryFreeClear(ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, blkSchema, schemaLen);
|
||||
code = tEncodeSVCreateTbReq(&encoder, &createTbReq);
|
||||
tEncoderClear(&encoder);
|
||||
tdDestroySVCreateTbReq(&createTbReq);
|
||||
|
||||
if (code < 0) {
|
||||
taosArrayDestroy(tagArray);
|
||||
taosMemoryFreeClear(ret);
|
||||
return NULL;
|
||||
}
|
||||
schemaLen = *(int32_t*)taosArrayGet(schemaReqSz, i);
|
||||
void* schemaStr = taosArrayGetP(schemaReqs, i);
|
||||
memcpy(blkSchema, schemaStr, schemaLen);
|
||||
}
|
||||
blkHead->schemaLen = htonl(schemaLen);
|
||||
|
||||
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
|
||||
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
SRowBuilder rb = {0};
|
||||
tdSRowInit(&rb, pTSchema->version);
|
||||
|
@ -175,7 +163,10 @@ SSubmitReq* tdBlockToSubmit(const SArray* pBlocks, const STSchema* pTSchema, boo
|
|||
}
|
||||
|
||||
ret->length = htonl(ret->length);
|
||||
taosArrayDestroy(tagArray);
|
||||
|
||||
if (schemaReqs) taosArrayDestroyP(schemaReqs, taosMemoryFree);
|
||||
taosArrayDestroy(schemaReqSz);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -2852,7 +2852,7 @@ int32_t tsdbGetAllTableList(SMeta* pMeta, uint64_t uid, SArray* list) {
|
|||
break;
|
||||
}
|
||||
|
||||
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, uid = id};
|
||||
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, uid = id, .groupId = 0};
|
||||
taosArrayPush(list, &info);
|
||||
}
|
||||
|
||||
|
|
|
@ -293,6 +293,7 @@ typedef enum EStreamScanMode {
|
|||
STREAM_SCAN_FROM_RES,
|
||||
STREAM_SCAN_FROM_UPDATERES,
|
||||
STREAM_SCAN_FROM_DATAREADER,
|
||||
STREAM_SCAN_FROM_DATAREADER_RETRIEVE,
|
||||
} EStreamScanMode;
|
||||
|
||||
typedef struct SCatchSupporter {
|
||||
|
@ -348,7 +349,9 @@ typedef struct SStreamBlockScanInfo {
|
|||
SArray* childIds;
|
||||
SessionWindowSupporter sessionSup;
|
||||
bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
|
||||
int32_t scanWinIndex;
|
||||
int32_t scanWinIndex; // for state operator
|
||||
int32_t pullDataResIndex;
|
||||
SSDataBlock* pPullDataRes; // pull data SSDataBlock
|
||||
} SStreamBlockScanInfo;
|
||||
|
||||
typedef struct SSysTableScanInfo {
|
||||
|
@ -427,8 +430,13 @@ typedef struct SStreamFinalIntervalOperatorInfo {
|
|||
STimeWindowAggSupp twAggSup;
|
||||
SArray* pChildren;
|
||||
SSDataBlock* pUpdateRes;
|
||||
bool returnUpdate;
|
||||
SPhysiNode* pPhyNode; // create new child
|
||||
bool isFinal;
|
||||
SHashObj* pPullDataMap;
|
||||
SArray* pPullWins; // SPullWindowInfo
|
||||
int32_t pullIndex;
|
||||
SSDataBlock* pPullDataRes;
|
||||
} SStreamFinalIntervalOperatorInfo;
|
||||
|
||||
typedef struct SAggOperatorInfo {
|
||||
|
@ -851,6 +859,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex);
|
||||
|
||||
int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* groupKey);
|
||||
SSDataBlock* createPullDataBlock();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -313,7 +313,7 @@ int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo
|
|||
}
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(res); i++) {
|
||||
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)};
|
||||
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
|
||||
taosArrayPush(pListInfo->pTableList, &info);
|
||||
}
|
||||
taosArrayDestroy(res);
|
||||
|
@ -333,8 +333,8 @@ int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo
|
|||
i++;
|
||||
}
|
||||
}
|
||||
} else { // Create one table group.
|
||||
STableKeyInfo info = {.lastKey = 0, .uid = tableUid};
|
||||
}else { // Create one table group.
|
||||
STableKeyInfo info = {.lastKey = 0, .uid = tableUid, .groupId = 0};
|
||||
taosArrayPush(pListInfo->pTableList, &info);
|
||||
}
|
||||
pListInfo->pGroupList = taosArrayInit(4, POINTER_BYTES);
|
||||
|
|
|
@ -4028,6 +4028,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
|
|||
int32_t len = (int32_t)(pStart - (char*)keyBuf);
|
||||
uint64_t groupId = calcGroupId(keyBuf, len);
|
||||
taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &groupId, sizeof(uint64_t));
|
||||
info->groupId = groupId;
|
||||
groupNum++;
|
||||
|
||||
nodesClearList(groupNew);
|
||||
|
@ -4126,7 +4127,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
return NULL;
|
||||
}
|
||||
} else { // Create one table group.
|
||||
STableKeyInfo info = {.lastKey = 0, .uid = pBlockNode->uid};
|
||||
STableKeyInfo info = {.lastKey = 0, .uid = pBlockNode->uid, .groupId = 0};
|
||||
taosArrayPush(pTableListInfo->pTableList, &info);
|
||||
}
|
||||
|
||||
|
|
|
@ -507,20 +507,21 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
STableScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
if(pInfo->currentGroupId == -1){
|
||||
if (pInfo->currentGroupId == -1) {
|
||||
pInfo->currentGroupId++;
|
||||
if (pInfo->currentGroupId >= taosArrayGetSize(pTaskInfo->tableqinfoList.pGroupList)) {
|
||||
setTaskStatus(pTaskInfo, TASK_COMPLETED);
|
||||
return NULL;
|
||||
}
|
||||
SArray *tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId);
|
||||
SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId);
|
||||
tsdbCleanupReadHandle(pInfo->dataReader);
|
||||
tsdbReaderT* pReader = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, pInfo->queryId, pInfo->taskId);
|
||||
tsdbReaderT* pReader =
|
||||
tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, pInfo->queryId, pInfo->taskId);
|
||||
pInfo->dataReader = pReader;
|
||||
}
|
||||
|
||||
SSDataBlock* result = doTableScanGroup(pOperator);
|
||||
if(result){
|
||||
if (result) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -530,7 +531,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
SArray *tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId);
|
||||
SArray* tableList = taosArrayGetP(pTaskInfo->tableqinfoList.pGroupList, pInfo->currentGroupId);
|
||||
tsdbSetTableList(pInfo->dataReader, tableList);
|
||||
|
||||
tsdbResetReadHandle(pInfo->dataReader, &pInfo->cond, 0);
|
||||
|
@ -538,7 +539,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
pInfo->scanTimes = 0;
|
||||
|
||||
result = doTableScanGroup(pOperator);
|
||||
if(result){
|
||||
if (result) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -776,15 +777,14 @@ static bool isStateWindow(SStreamBlockScanInfo* pInfo) {
|
|||
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
|
||||
}
|
||||
|
||||
static bool prepareDataScan(SStreamBlockScanInfo* pInfo) {
|
||||
SSDataBlock* pSDB = pInfo->pUpdateRes;
|
||||
STimeWindow win = {
|
||||
.skey = INT64_MIN,
|
||||
.ekey = INT64_MAX,
|
||||
static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) {
|
||||
STimeWindow win = {
|
||||
.skey = INT64_MIN,
|
||||
.ekey = INT64_MAX,
|
||||
};
|
||||
bool needRead = false;
|
||||
if (!isStateWindow(pInfo) && pInfo->updateResIndex < pSDB->info.rows) {
|
||||
SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, pInfo->primaryTsIndex);
|
||||
if (!isStateWindow(pInfo) && (*pRowIndex) < pSDB->info.rows) {
|
||||
SColumnInfoData* pColDataInfo = taosArrayGet(pSDB->pDataBlock, tsColIndex);
|
||||
TSKEY* tsCols = (TSKEY*)pColDataInfo->pData;
|
||||
SResultRowInfo dumyInfo;
|
||||
dumyInfo.cur.pageId = -1;
|
||||
|
@ -793,15 +793,14 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) {
|
|||
int64_t gap = pInfo->sessionSup.gap;
|
||||
int32_t winIndex = 0;
|
||||
SResultWindowInfo* pCurWin =
|
||||
getSessionTimeWindow(pAggSup, tsCols[pInfo->updateResIndex], INT64_MIN, pSDB->info.groupId, gap, &winIndex);
|
||||
getSessionTimeWindow(pAggSup, tsCols[(*pRowIndex)], INT64_MIN, pSDB->info.groupId, gap, &winIndex);
|
||||
win = pCurWin->win;
|
||||
pInfo->updateResIndex +=
|
||||
updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, pInfo->updateResIndex, gap, NULL);
|
||||
(*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, (*pRowIndex), gap, NULL);
|
||||
} else {
|
||||
win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[pInfo->updateResIndex], &pInfo->interval,
|
||||
pInfo->interval.precision, NULL);
|
||||
pInfo->updateResIndex += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, pInfo->updateResIndex, win.ekey,
|
||||
binarySearchForKey, NULL, TSDB_ORDER_ASC);
|
||||
win =
|
||||
getActiveTimeWindow(NULL, &dumyInfo, tsCols[(*pRowIndex)], &pInfo->interval, pInfo->interval.precision, NULL);
|
||||
(*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, (*pRowIndex), win.ekey, binarySearchForKey, NULL,
|
||||
TSDB_ORDER_ASC);
|
||||
}
|
||||
needRead = true;
|
||||
} else if (isStateWindow(pInfo)) {
|
||||
|
@ -822,7 +821,10 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo) {
|
|||
STableScanInfo* pTableScanInfo = pInfo->pSnapshotReadOp->info;
|
||||
pTableScanInfo->cond.twindows[0] = win;
|
||||
pTableScanInfo->curTWinIdx = 0;
|
||||
// tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0);
|
||||
// tsdbResetReadHandle(pTableScanInfo->dataReader, &pTableScanInfo->cond, 0);
|
||||
// if (!pTableScanInfo->dataReader) {
|
||||
// return false;
|
||||
// }
|
||||
pTableScanInfo->scanTimes = 0;
|
||||
pTableScanInfo->currentGroupId = -1;
|
||||
return true;
|
||||
|
@ -862,12 +864,12 @@ static uint64_t getGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_
|
|||
*/
|
||||
}
|
||||
|
||||
static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo) {
|
||||
static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) {
|
||||
while (1) {
|
||||
SSDataBlock* pResult = NULL;
|
||||
pResult = doTableScan(pInfo->pSnapshotReadOp);
|
||||
if (pResult == NULL) {
|
||||
if (prepareDataScan(pInfo)) {
|
||||
if (prepareDataScan(pInfo, pSDB, tsColIndex, pRowIndex)) {
|
||||
// scan next window data
|
||||
pResult = doTableScan(pInfo->pSnapshotReadOp);
|
||||
}
|
||||
|
@ -916,7 +918,7 @@ static void setUpdateData(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, SSDa
|
|||
pUpdateBlock->info.rows = i;
|
||||
pInfo->tsArrayIndex += i;
|
||||
pUpdateBlock->info.groupId = pInfo->groupId;
|
||||
pUpdateBlock->info.type = STREAM_REPROCESS;
|
||||
pUpdateBlock->info.type = STREAM_CLEAR;
|
||||
blockDataUpdateTsWindow(pUpdateBlock, 0);
|
||||
}
|
||||
// all rows have same group id
|
||||
|
@ -970,6 +972,14 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
|
|||
int32_t current = pInfo->validBlockIndex++;
|
||||
SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current);
|
||||
blockDataUpdateTsWindow(pBlock, 0);
|
||||
if (pBlock->info.type == STREAM_RETRIEVE) {
|
||||
pInfo->blockType = STREAM_DATA_TYPE_SUBMIT_BLOCK;
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE;
|
||||
copyDataBlock(pInfo->pPullDataRes, pBlock);
|
||||
pInfo->pullDataResIndex = 0;
|
||||
prepareDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex);
|
||||
updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo);
|
||||
}
|
||||
return pBlock;
|
||||
} else if (pInfo->blockType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
|
||||
if (pInfo->scanMode == STREAM_SCAN_FROM_RES) {
|
||||
|
@ -979,28 +989,39 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
|
|||
} else if (pInfo->scanMode == STREAM_SCAN_FROM_UPDATERES) {
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
|
||||
if (!isStateWindow(pInfo)) {
|
||||
prepareDataScan(pInfo);
|
||||
prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
|
||||
}
|
||||
return pInfo->pUpdateRes;
|
||||
} else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RETRIEVE) {
|
||||
SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex);
|
||||
if (pSDB != NULL) {
|
||||
getUpdateDataBlock(pInfo, true, pSDB, NULL);
|
||||
pSDB->info.type = STREAM_PUSH_DATA;
|
||||
return pSDB;
|
||||
}
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
|
||||
} else {
|
||||
if (isStateWindow(pInfo) && taosArrayGetSize(pInfo->sessionSup.pStreamAggSup->pScanWindow) > 0) {
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
|
||||
pInfo->updateResIndex = pInfo->pUpdateRes->info.rows;
|
||||
prepareDataScan(pInfo);
|
||||
prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
|
||||
}
|
||||
if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) {
|
||||
SSDataBlock* pSDB = doDataScan(pInfo);
|
||||
SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
|
||||
if (pSDB == NULL) {
|
||||
setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes);
|
||||
if (pInfo->pUpdateRes->info.rows > 0) {
|
||||
if (!isStateWindow(pInfo)) {
|
||||
prepareDataScan(pInfo);
|
||||
// Todo(liuyao) mybe can delete this.
|
||||
bool test = prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
|
||||
ASSERT(test == false);
|
||||
}
|
||||
return pInfo->pUpdateRes;
|
||||
} else {
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
|
||||
}
|
||||
} else {
|
||||
pSDB->info.type = STREAM_NORMAL;
|
||||
getUpdateDataBlock(pInfo, true, pSDB, NULL);
|
||||
return pSDB;
|
||||
}
|
||||
|
@ -1012,12 +1033,13 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
|
|||
|
||||
while (tqNextDataBlock(pInfo->streamBlockReader)) {
|
||||
SSDataBlock block = {0};
|
||||
uint64_t groupId = 0;
|
||||
uint64_t uid = 0;
|
||||
int32_t numOfRows = 0;
|
||||
|
||||
// todo refactor
|
||||
int32_t code = tqRetrieveDataBlock(&block, pInfo->streamBlockReader, &groupId, &uid, &numOfRows);
|
||||
int32_t code = tqRetrieveDataBlock(&block, pInfo->streamBlockReader);
|
||||
|
||||
uint64_t groupId = block.info.groupId;
|
||||
uint64_t uid = block.info.uid;
|
||||
int32_t numOfRows = block.info.rows;
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS || numOfRows == 0) {
|
||||
pTaskInfo->code = code;
|
||||
|
@ -1070,10 +1092,12 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
|
|||
taosArrayDestroy(block.pDataBlock);
|
||||
if (pInfo->pRes->pDataBlock == NULL) {
|
||||
// TODO add log
|
||||
updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
pTaskInfo->code = terrno;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// currently only the tbname pseudo column
|
||||
if (pInfo->numOfPseudoExpr > 0) {
|
||||
addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes);
|
||||
|
@ -1091,12 +1115,13 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
|
|||
pOperator->resultInfo.totalRows += pBlockInfo->rows;
|
||||
|
||||
if (pBlockInfo->rows == 0) {
|
||||
updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
} else if (pInfo->pUpdateInfo) {
|
||||
pInfo->tsArrayIndex = 0;
|
||||
getUpdateDataBlock(pInfo, true, pInfo->pRes, pInfo->pUpdateRes);
|
||||
if (pInfo->pUpdateRes->info.rows > 0) {
|
||||
if (pInfo->pUpdateRes->info.type == STREAM_REPROCESS) {
|
||||
if (pInfo->pUpdateRes->info.type == STREAM_CLEAR) {
|
||||
pInfo->updateResIndex = 0;
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
|
||||
} else if (pInfo->pUpdateRes->info.type == STREAM_INVERT) {
|
||||
|
@ -1130,9 +1155,9 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
|
|||
return tableIdList;
|
||||
}
|
||||
|
||||
SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle,
|
||||
STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo,
|
||||
STimeWindowAggSupp* pTwSup, uint64_t queryId, uint64_t taskId) {
|
||||
SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode,
|
||||
SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup, uint64_t queryId,
|
||||
uint64_t taskId) {
|
||||
SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
|
||||
|
@ -1209,6 +1234,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle,
|
|||
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
|
||||
pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1};
|
||||
pInfo->groupId = 0;
|
||||
pInfo->pPullDataRes = createPullDataBlock();
|
||||
|
||||
pOperator->name = "StreamBlockScanOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
|
||||
|
@ -1718,14 +1744,14 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
|
|||
SSDataBlock* pResBlock = createResDataBlock(pDescNode);
|
||||
|
||||
int32_t num = 0;
|
||||
SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID);
|
||||
SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID);
|
||||
|
||||
pInfo->accountId = pScanPhyNode->accountId;
|
||||
pInfo->pUser = taosMemoryStrDup((void*) pUser);
|
||||
pInfo->accountId = pScanPhyNode->accountId;
|
||||
pInfo->pUser = taosMemoryStrDup((void*)pUser);
|
||||
pInfo->showRewrite = pScanPhyNode->showRewrite;
|
||||
pInfo->pRes = pResBlock;
|
||||
pInfo->pCondition = pScanNode->node.pConditions;
|
||||
pInfo->scanCols = colList;
|
||||
pInfo->pRes = pResBlock;
|
||||
pInfo->pCondition = pScanNode->node.pConditions;
|
||||
pInfo->scanCols = colList;
|
||||
|
||||
initResultSizeInfo(pOperator, 4096);
|
||||
|
||||
|
@ -1741,13 +1767,13 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
|
|||
pInfo->readHandle = *(SReadHandle*)readHandle;
|
||||
}
|
||||
|
||||
pOperator->name = "SysTableScanOperator";
|
||||
pOperator->name = "SysTableScanOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->blocking = false;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pResBlock->pDataBlock);
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(operatorDummyOpenFn, doSysTableScan, NULL, NULL, destroySysScanOperator, NULL, NULL, NULL);
|
||||
|
@ -1934,11 +1960,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
|
|||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->pTableList = pTableListInfo;
|
||||
pInfo->pColMatchInfo = colList;
|
||||
pInfo->pRes = createResDataBlock(pDescNode);
|
||||
pInfo->readHandle = *pReadHandle;
|
||||
pInfo->curPos = 0;
|
||||
pInfo->pTableList = pTableListInfo;
|
||||
pInfo->pColMatchInfo = colList;
|
||||
pInfo->pRes = createResDataBlock(pDescNode);
|
||||
pInfo->readHandle = *pReadHandle;
|
||||
pInfo->curPos = 0;
|
||||
|
||||
pOperator->name = "TagScanOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN;
|
||||
|
@ -1965,7 +1991,10 @@ _error:
|
|||
|
||||
typedef struct STableMergeScanInfo {
|
||||
STableListInfo* tableListInfo;
|
||||
int32_t currentGroupId;
|
||||
int32_t tableStartIndex;
|
||||
int32_t tableEndIndex;
|
||||
bool hasGroupId;
|
||||
uint64_t groupId;
|
||||
|
||||
SArray* dataReaders; // array of tsdbReaderT*
|
||||
SReadHandle readHandle;
|
||||
|
@ -2030,6 +2059,22 @@ int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle, STableListInfo* pTableListInfo,
|
||||
int32_t tableStartIdx, int32_t tableEndIdx, SArray* arrayReader, uint64_t queryId,
|
||||
uint64_t taskId) {
|
||||
for (int32_t i = tableStartIdx; i <= tableEndIdx; ++i) {
|
||||
SArray* subTableList = taosArrayInit(1, sizeof(STableKeyInfo));
|
||||
taosArrayPush(subTableList, taosArrayGet(pTableListInfo->pTableList, i));
|
||||
|
||||
tsdbReaderT* pReader = tsdbReaderOpen(pHandle->vnode, pQueryCond, subTableList, queryId, taskId);
|
||||
taosArrayPush(arrayReader, &pReader);
|
||||
|
||||
taosArrayDestroy(subTableList);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo,
|
||||
int32_t readerIdx, SSDataBlock* pBlock, uint32_t* status) {
|
||||
|
@ -2216,34 +2261,32 @@ SArray* generateSortByTsInfo(int32_t order) {
|
|||
return pList;
|
||||
}
|
||||
|
||||
static int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle, SArray* tableList, SArray* arrayReader, uint64_t queryId,
|
||||
uint64_t taskId) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(tableList); ++i) {
|
||||
SArray* tmp = taosArrayInit(1, sizeof(STableKeyInfo));
|
||||
taosArrayPush(tmp, taosArrayGet(tableList, i));
|
||||
|
||||
tsdbReaderT* pReader = tsdbReaderOpen(pHandle->vnode, pQueryCond, tmp, queryId, taskId);
|
||||
taosArrayPush(arrayReader, &pReader);
|
||||
|
||||
taosArrayDestroy(tmp);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
|
||||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SArray* tableList = taosArrayGetP(pInfo->tableListInfo->pGroupList, pInfo->currentGroupId);
|
||||
{
|
||||
size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList);
|
||||
int32_t i = pInfo->tableStartIndex + 1;
|
||||
for (; i < tableListSize; ++i) {
|
||||
STableKeyInfo* tableKeyInfo = taosArrayGet(pInfo->tableListInfo->pTableList, i);
|
||||
if (tableKeyInfo->groupId != pInfo->groupId) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
pInfo->tableEndIndex = i - 1;
|
||||
}
|
||||
|
||||
createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableList,
|
||||
int32_t tableStartIdx = pInfo->tableStartIndex;
|
||||
int32_t tableEndIdx = pInfo->tableEndIndex;
|
||||
|
||||
STableListInfo* tableListInfo = pInfo->tableListInfo;
|
||||
createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableListInfo, tableStartIdx, tableEndIdx,
|
||||
pInfo->dataReaders, pInfo->queryId, pInfo->taskId);
|
||||
|
||||
// todo the total available buffer should be determined by total capacity of buffer of this task.
|
||||
// the additional one is reserved for merge result
|
||||
int32_t tableLen = taosArrayGetSize(tableList);
|
||||
pInfo->sortBufSize = pInfo->bufPageSize * ((tableLen==0?1:tableLen) + 1);
|
||||
pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1);
|
||||
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
|
||||
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
|
||||
pInfo->pSortInputBlock, pTaskInfo->id.str);
|
||||
|
@ -2330,43 +2373,38 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
longjmp(pTaskInfo->env, code);
|
||||
}
|
||||
size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList);
|
||||
if (!pInfo->hasGroupId) {
|
||||
pInfo->hasGroupId = true;
|
||||
|
||||
if (pInfo->currentGroupId == -1) {
|
||||
pInfo->currentGroupId++;
|
||||
if (pInfo->currentGroupId >= taosArrayGetSize(pInfo->tableListInfo->pGroupList)) {
|
||||
if (tableListSize == 0) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
pInfo->tableStartIndex = 0;
|
||||
pInfo->groupId = ((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId;
|
||||
startGroupTableMergeScan(pOperator);
|
||||
}
|
||||
SSDataBlock* pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator);
|
||||
if (pBlock != NULL) {
|
||||
uint64_t* groupId = taosHashGet(pInfo->tableListInfo->map, &(pBlock->info.uid), sizeof(uint64_t));
|
||||
if(groupId) pBlock->info.groupId = *groupId;
|
||||
|
||||
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||
return pBlock;
|
||||
SSDataBlock* pBlock = NULL;
|
||||
while (pInfo->tableStartIndex < tableListSize) {
|
||||
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator);
|
||||
if (pBlock != NULL) {
|
||||
pBlock->info.groupId = pInfo->groupId;
|
||||
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||
return pBlock;
|
||||
} else {
|
||||
stopGroupTableMergeScan(pOperator);
|
||||
if (pInfo->tableEndIndex >= tableListSize - 1) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
pInfo->tableStartIndex = pInfo->tableEndIndex + 1;
|
||||
pInfo->groupId =
|
||||
((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId;
|
||||
startGroupTableMergeScan(pOperator);
|
||||
}
|
||||
}
|
||||
|
||||
stopGroupTableMergeScan(pOperator);
|
||||
pInfo->currentGroupId++;
|
||||
if (pInfo->currentGroupId >= taosArrayGetSize(pInfo->tableListInfo->pGroupList)) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
startGroupTableMergeScan(pOperator);
|
||||
|
||||
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator);
|
||||
if (pBlock != NULL) {
|
||||
uint64_t* groupId = taosHashGet(pInfo->tableListInfo->map, &(pBlock->info.uid), sizeof(uint64_t));
|
||||
if(groupId) pBlock->info.groupId = *groupId;
|
||||
|
||||
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
doSetOperatorCompleted(pOperator);
|
||||
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
|
@ -2403,6 +2441,12 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t compareTableKeyInfoByGid(const void* p1, const void* p2) {
|
||||
const STableKeyInfo* info1 = p1;
|
||||
const STableKeyInfo* info2 = p2;
|
||||
return info1->groupId - info2->groupId;
|
||||
}
|
||||
|
||||
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
|
||||
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo, uint64_t queryId,
|
||||
uint64_t taskId) {
|
||||
|
@ -2411,6 +2455,9 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
if (pInfo == NULL || pOperator == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
if (pTableScanNode->pPartitionTags) {
|
||||
taosArraySort(pTableListInfo->pTableList, compareTableKeyInfoByGid);
|
||||
}
|
||||
|
||||
SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc;
|
||||
|
||||
|
@ -2443,7 +2490,6 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
pInfo->dataReaders = taosArrayInit(64, POINTER_BYTES);
|
||||
pInfo->queryId = queryId;
|
||||
pInfo->taskId = taskId;
|
||||
pInfo->currentGroupId = -1;
|
||||
|
||||
pInfo->sortSourceParams = taosArrayInit(64, sizeof(STableMergeScanSortSourceParam));
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "executorimpl.h"
|
||||
#include "function.h"
|
||||
#include "functionMgt.h"
|
||||
#include "tcompare.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tfill.h"
|
||||
#include "ttime.h"
|
||||
|
@ -26,6 +27,16 @@ typedef enum SResultTsInterpType {
|
|||
|
||||
#define IS_FINAL_OP(op) ((op)->isFinal)
|
||||
|
||||
typedef struct SWinRes {
|
||||
TSKEY ts;
|
||||
uint64_t groupId;
|
||||
} SWinRes;
|
||||
|
||||
typedef struct SPullWindowInfo {
|
||||
STimeWindow window;
|
||||
uint64_t groupId;
|
||||
} SPullWindowInfo;
|
||||
|
||||
static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator);
|
||||
|
||||
static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo);
|
||||
|
@ -684,11 +695,13 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
|
|||
}
|
||||
|
||||
void printDataBlock(SSDataBlock* pBlock, const char* flag) {
|
||||
if (pBlock == NULL) return;
|
||||
SArray* blocks = taosArrayInit(1, sizeof(SSDataBlock));
|
||||
taosArrayPush(blocks, pBlock);
|
||||
blockDebugShowData(blocks, flag);
|
||||
taosArrayDestroy(blocks);
|
||||
if (pBlock == NULL){
|
||||
qDebug("======printDataBlock Block is Null");
|
||||
return;
|
||||
}
|
||||
char *pBuf = NULL;
|
||||
qDebug("%s", dumpBlockData(pBlock, flag, &pBuf));
|
||||
taosMemoryFree(pBuf);
|
||||
}
|
||||
|
||||
typedef int64_t (*__get_value_fn_t)(void* data, int32_t index);
|
||||
|
@ -1217,30 +1230,40 @@ void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprS
|
|||
}
|
||||
}
|
||||
|
||||
void doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t bytes, uint64_t groupId,
|
||||
bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t bytes, uint64_t groupId,
|
||||
int32_t numOfOutput) {
|
||||
SET_RES_WINDOW_KEY(pAggSup->keyBuf, pData, bytes, groupId);
|
||||
SResultRowPosition* p1 =
|
||||
(SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
|
||||
if (!p1) {
|
||||
// window has been closed
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
doClearWindowImpl(p1, pAggSup->pResultBuf, pSup, numOfOutput);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval* pInterval, int32_t tsIndex,
|
||||
int32_t numOfOutput, SSDataBlock* pBlock, SArray* pUpWins) {
|
||||
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, tsIndex);
|
||||
TSKEY* tsCols = (TSKEY*)pColDataInfo->pData;
|
||||
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, tsIndex);
|
||||
TSKEY* tsCols = (TSKEY*)pTsCol->pData;
|
||||
uint64_t* pGpDatas = NULL;
|
||||
if (pBlock->info.type == STREAM_RETRIEVE) {
|
||||
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, 2);
|
||||
pGpDatas = (uint64_t*)pGpCol->pData;
|
||||
}
|
||||
int32_t step = 0;
|
||||
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
|
||||
SResultRowInfo dumyInfo;
|
||||
dumyInfo.cur.pageId = -1;
|
||||
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCols[i], pInterval, pInterval->precision, NULL);
|
||||
step = getNumOfRowsInTimeWindow(&pBlock->info, tsCols, i, win.ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
|
||||
doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), pBlock->info.groupId, numOfOutput);
|
||||
if (pUpWins) {
|
||||
uint64_t groupId = pBlock->info.groupId;
|
||||
if (pGpDatas) {
|
||||
groupId = pGpDatas[i];
|
||||
}
|
||||
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TKEY), groupId, numOfOutput);
|
||||
if (pUpWins && res) {
|
||||
taosArrayPush(pUpWins, &win);
|
||||
}
|
||||
}
|
||||
|
@ -1268,8 +1291,8 @@ bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup) {
|
|||
return pSup->maxTs != INT64_MIN && pWin->ekey < pSup->maxTs - pSup->waterMark;
|
||||
}
|
||||
|
||||
static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
|
||||
SArray* closeWins) {
|
||||
static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
|
||||
SInterval* pInterval, SHashObj* pPullDataMap, SArray* closeWins) {
|
||||
void* pIte = NULL;
|
||||
size_t keyLen = 0;
|
||||
while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
|
||||
|
@ -1280,10 +1303,20 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
|
|||
SResultRowInfo dumyInfo;
|
||||
dumyInfo.cur.pageId = -1;
|
||||
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, ts, pInterval, pInterval->precision, NULL);
|
||||
SWinRes winRe = {.ts = win.skey, .groupId = groupId,};
|
||||
void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinRes));
|
||||
if (isCloseWindow(&win, pSup)) {
|
||||
char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))];
|
||||
SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId);
|
||||
taosHashRemove(pHashMap, keyBuf, keyLen);
|
||||
if (chIds && pPullDataMap) {
|
||||
SArray* chAy = *(SArray**) chIds;
|
||||
int32_t size = taosArrayGetSize(chAy);
|
||||
qInfo("======window %ld wait child size:%d", win.skey ,size);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
qInfo("======window %ld wait chid id:%d", win.skey ,*(int32_t*)taosArrayGet(chAy, i));
|
||||
}
|
||||
continue;
|
||||
} else if (pPullDataMap) {
|
||||
qInfo("======close window %ld", win.skey);
|
||||
}
|
||||
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
|
||||
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
int32_t code = saveResult(ts, pPos->pageId, pPos->offset, groupId, closeWins);
|
||||
|
@ -1291,11 +1324,25 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
|
|||
return code;
|
||||
}
|
||||
}
|
||||
char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))];
|
||||
SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId);
|
||||
taosHashRemove(pHashMap, keyBuf, keyLen);
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void closeChildIntervalWindow(SArray* pChildren, TSKEY maxTs) {
|
||||
int32_t size = taosArrayGetSize(pChildren);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SOperatorInfo* pChildOp = taosArrayGetP(pChildren, i);
|
||||
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
|
||||
ASSERT(pChInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE);
|
||||
pChInfo->twAggSup.maxTs = TMAX(pChInfo->twAggSup.maxTs, maxTs);
|
||||
closeIntervalWindow(pChInfo->aggSup.pResultRowHashTable, &pChInfo->twAggSup, &pChInfo->interval, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
||||
SIntervalAggOperatorInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
@ -1324,7 +1371,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (pBlock->info.type == STREAM_REPROCESS) {
|
||||
if (pBlock->info.type == STREAM_CLEAR) {
|
||||
doClearWindows(&pInfo->aggSup, &pOperator->exprSupp, &pInfo->interval, 0, pOperator->exprSupp.numOfExprs, pBlock,
|
||||
NULL);
|
||||
qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo));
|
||||
|
@ -1345,7 +1392,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
|
||||
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdated);
|
||||
}
|
||||
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pUpdated);
|
||||
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdated);
|
||||
|
||||
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
|
||||
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
|
||||
|
@ -1373,6 +1420,11 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)param;
|
||||
cleanupBasicInfo(&pInfo->binfo);
|
||||
cleanupAggSup(&pInfo->aggSup);
|
||||
//it should be empty.
|
||||
taosHashCleanup(pInfo->pPullDataMap);
|
||||
taosArrayDestroy(pInfo->pPullWins);
|
||||
blockDataDestroy(pInfo->pPullDataRes);
|
||||
|
||||
if (pInfo->pChildren) {
|
||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
|
@ -2164,6 +2216,24 @@ bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup) {
|
|||
return p1 == NULL;
|
||||
}
|
||||
|
||||
int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY* tsCols,
|
||||
int32_t startPos, TSKEY eKey, STimeWindow* pNextWin) {
|
||||
int32_t forwardRows = getNumOfRowsInTimeWindow(pBlockInfo, tsCols, startPos,
|
||||
eKey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
|
||||
int32_t prevEndPos = forwardRows - 1 + startPos;
|
||||
return getNextQualifiedWindow(pInterval, pNextWin, pBlockInfo, tsCols, prevEndPos, TSDB_ORDER_ASC);
|
||||
}
|
||||
|
||||
void addPullWindow(SHashObj* pMap, SWinRes* pWinRes, int32_t size) {
|
||||
SArray* childIds = taosArrayInit(8, sizeof(int32_t));
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
taosArrayPush(childIds, &i);
|
||||
}
|
||||
taosHashPut(pMap, pWinRes, sizeof(SWinRes), &childIds, sizeof(void*));
|
||||
}
|
||||
|
||||
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
|
||||
|
||||
static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
|
||||
SArray* pUpdated) {
|
||||
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info;
|
||||
|
@ -2177,35 +2247,59 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
|
|||
SResultRow* pResult = NULL;
|
||||
int32_t forwardRows = 0;
|
||||
|
||||
if (pSDataBlock->pDataBlock != NULL) {
|
||||
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
|
||||
tsCols = (int64_t*)pColDataInfo->pData;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
ASSERT(pSDataBlock->pDataBlock != NULL);
|
||||
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
|
||||
tsCols = (int64_t*)pColDataInfo->pData;
|
||||
|
||||
int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1);
|
||||
TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols);
|
||||
STimeWindow nextWin = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval,
|
||||
pInfo->interval.precision, NULL);
|
||||
while (1) {
|
||||
if (IS_FINAL_OP(pInfo) && isCloseWindow(&nextWin, &pInfo->twAggSup) &&
|
||||
isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup)) {
|
||||
SArray* pUpWins = taosArrayInit(8, sizeof(STimeWindow));
|
||||
taosArrayPush(pUpWins, &nextWin);
|
||||
rebuildIntervalWindow(pInfo, pSup, pUpWins, pInfo->binfo.pRes->info.groupId, pSup->numOfExprs,
|
||||
pOperatorInfo->pTaskInfo);
|
||||
taosArrayDestroy(pUpWins);
|
||||
if (IS_FINAL_OP(pInfo) && isCloseWindow(&nextWin, &pInfo->twAggSup) && pInfo->pChildren) {
|
||||
bool ignore = true;
|
||||
SWinRes winRes = {.ts = nextWin.skey, .groupId = tableGroupId,};
|
||||
void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
|
||||
if (isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup) && !chIds) {
|
||||
SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId};
|
||||
// add pull data request
|
||||
taosArrayPush(pInfo->pPullWins, &pull);
|
||||
addPullWindow(pInfo->pPullDataMap, &winRes, taosArrayGetSize(pInfo->pChildren));
|
||||
} else {
|
||||
int32_t index = -1;
|
||||
SArray* chArray = NULL;
|
||||
if (chIds) {
|
||||
chArray = *(void**) chIds;
|
||||
int32_t chId = getChildIndex(pSDataBlock);
|
||||
index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
||||
}
|
||||
if (index != -1 && pSDataBlock->info.type == STREAM_PUSH_DATA) {
|
||||
taosArrayRemove(chArray, index);
|
||||
if (taosArrayGetSize(chArray) == 0) {
|
||||
// pull data is over
|
||||
taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
|
||||
}
|
||||
}
|
||||
if ( index == -1 || pSDataBlock->info.type == STREAM_PUSH_DATA) {
|
||||
ignore = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (ignore) {
|
||||
startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
|
||||
if (startPos < 0) {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, true, &pResult, tableGroupId, pSup->pCtx,
|
||||
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
|
||||
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
SResKeyPos* pos = taosMemoryMalloc(sizeof(SResKeyPos) + sizeof(uint64_t));
|
||||
pos->groupId = tableGroupId;
|
||||
pos->pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
|
||||
*(int64_t*)pos->key = pResult->win.skey;
|
||||
|
||||
forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL,
|
||||
TSDB_ORDER_ASC);
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdated) {
|
||||
|
@ -2230,14 +2324,17 @@ static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo)
|
|||
initResultRowInfo(&pInfo->binfo.resultRowInfo);
|
||||
}
|
||||
|
||||
static void clearUpdateDataBlock(SSDataBlock* pBlock) {
|
||||
static void clearSpecialDataBlock(SSDataBlock* pBlock) {
|
||||
if (pBlock->info.rows <= 0) {
|
||||
return;
|
||||
}
|
||||
blockDataCleanup(pBlock);
|
||||
}
|
||||
|
||||
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex) {
|
||||
// ASSERT(pDest->info.capacity >= pSource->info.rows);
|
||||
blockDataEnsureCapacity(pDest, pSource->info.rows);
|
||||
clearUpdateDataBlock(pDest);
|
||||
clearSpecialDataBlock(pDest);
|
||||
SColumnInfoData* pDestCol = taosArrayGet(pDest->pDataBlock, 0);
|
||||
SColumnInfoData* pSourceCol = taosArrayGet(pSource->pDataBlock, tsColIndex);
|
||||
|
||||
|
@ -2254,7 +2351,63 @@ void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsCol
|
|||
blockDataUpdateTsWindow(pDest, 0);
|
||||
}
|
||||
|
||||
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
|
||||
static bool needBreak(SStreamFinalIntervalOperatorInfo* pInfo) {
|
||||
int32_t size = taosArrayGetSize(pInfo->pPullWins);
|
||||
if (pInfo->pullIndex < size) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pBlock) {
|
||||
clearSpecialDataBlock(pBlock);
|
||||
int32_t size = taosArrayGetSize(array);
|
||||
if (size - (*pIndex) == 0) {
|
||||
return;
|
||||
}
|
||||
blockDataEnsureCapacity(pBlock, size - (*pIndex) );
|
||||
ASSERT(3 <= taosArrayGetSize(pBlock->pDataBlock));
|
||||
for (; (*pIndex) < size; (*pIndex)++) {
|
||||
SPullWindowInfo* pWin = taosArrayGet(array, (*pIndex) );
|
||||
SColumnInfoData* pStartTs = (SColumnInfoData*) taosArrayGet(pBlock->pDataBlock, 0);
|
||||
colDataAppend(pStartTs, pBlock->info.rows, (const char*)&pWin->window.skey, false);
|
||||
|
||||
SColumnInfoData* pEndTs = (SColumnInfoData*) taosArrayGet(pBlock->pDataBlock, 1);
|
||||
colDataAppend(pEndTs, pBlock->info.rows, (const char*)&pWin->window.ekey, false);
|
||||
|
||||
SColumnInfoData* pGroupId = (SColumnInfoData*) taosArrayGet(pBlock->pDataBlock, 2);
|
||||
colDataAppend(pGroupId, pBlock->info.rows, (const char*)&pWin->groupId, false);
|
||||
pBlock->info.rows++;
|
||||
}
|
||||
if ((*pIndex) == size) {
|
||||
*pIndex = 0;
|
||||
taosArrayClear(array);
|
||||
}
|
||||
blockDataUpdateTsWindow(pBlock, 0);
|
||||
}
|
||||
|
||||
void processPushEmpty(SSDataBlock* pBlock, SHashObj* pMap) {
|
||||
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, 0);
|
||||
TSKEY* tsData = (TSKEY*)pStartCol->pData;
|
||||
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, 2);
|
||||
uint64_t* groupIdData = (uint64_t*)pGroupCol->pData;
|
||||
int32_t chId = getChildIndex(pBlock);
|
||||
for (int32_t i = 0; i < pBlock->info.rows; i++) {
|
||||
SWinRes winRes = {.ts = tsData[i], .groupId = groupIdData[i]};
|
||||
void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinRes));
|
||||
if (chIds) {
|
||||
SArray* chArray = *(SArray**) chIds;
|
||||
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
||||
if (index != -1) {
|
||||
taosArrayRemove(chArray, index);
|
||||
if (taosArrayGetSize(chArray) == 0) {
|
||||
// pull data is over
|
||||
taosHashRemove(pMap, &winRes, sizeof(SWinRes));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
||||
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
|
||||
|
@ -2270,28 +2423,50 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||
if (pInfo->binfo.pRes->info.rows == 0) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
if (IS_FINAL_OP(pInfo) || pInfo->pUpdateRes->info.rows == 0) {
|
||||
if (!IS_FINAL_OP(pInfo)) {
|
||||
// semi interval operator clear disk buffer
|
||||
clearStreamIntervalOperator(pInfo);
|
||||
}
|
||||
return NULL;
|
||||
if (!IS_FINAL_OP(pInfo)) {
|
||||
// semi interval operator clear disk buffer
|
||||
clearStreamIntervalOperator(pInfo);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->binfo.pRes;
|
||||
} else {
|
||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->binfo.pRes;
|
||||
}
|
||||
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
|
||||
pInfo->returnUpdate = false;
|
||||
ASSERT(!IS_FINAL_OP(pInfo));
|
||||
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
// process the rest of the data
|
||||
pOperator->status = OP_OPENED;
|
||||
return pInfo->pUpdateRes;
|
||||
}
|
||||
return pInfo->binfo.pRes;
|
||||
doBuildPullDataBlock(pInfo->pPullWins, &pInfo->pullIndex, pInfo->pPullDataRes);
|
||||
if (pInfo->pPullDataRes->info.rows != 0) {
|
||||
// process the rest of the data
|
||||
ASSERT(IS_FINAL_OP(pInfo));
|
||||
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->pPullDataRes;
|
||||
}
|
||||
}
|
||||
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
clearUpdateDataBlock(pInfo->pUpdateRes);
|
||||
clearSpecialDataBlock(pInfo->pUpdateRes);
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
qInfo("Stream Final Interval return data");
|
||||
break;
|
||||
}
|
||||
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
|
||||
maxTs = TMAX(maxTs, pBlock->info.window.ekey);
|
||||
|
||||
if (pBlock->info.type == STREAM_REPROCESS) {
|
||||
if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PUSH_DATA || pBlock->info.type == STREAM_INVALID) {
|
||||
pInfo->binfo.pRes->info.type = pBlock->info.type;
|
||||
} else if (pBlock->info.type == STREAM_CLEAR) {
|
||||
SArray* pUpWins = taosArrayInit(8, sizeof(STimeWindow));
|
||||
doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pInfo->primaryTsIndex, pOperator->exprSupp.numOfExprs,
|
||||
pBlock, pUpWins);
|
||||
|
@ -2310,11 +2485,25 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
removeResults(pUpWins, pUpdated);
|
||||
copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
|
||||
pInfo->returnUpdate = true;
|
||||
taosArrayDestroy(pUpWins);
|
||||
break;
|
||||
} else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) {
|
||||
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdated);
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) {
|
||||
SArray* pUpWins = taosArrayInit(8, sizeof(STimeWindow));
|
||||
doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, 0, pOperator->exprSupp.numOfExprs,
|
||||
pBlock, pUpWins);
|
||||
removeResults(pUpWins, pUpdated);
|
||||
taosArrayDestroy(pUpWins);
|
||||
if (taosArrayGetSize(pUpdated) > 0) {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_PUSH_EMPTY && IS_FINAL_OP(pInfo)) {
|
||||
processPushEmpty(pBlock, pInfo->pPullDataMap);
|
||||
continue;
|
||||
}
|
||||
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, MAIN_SCAN, true);
|
||||
|
@ -2334,31 +2523,70 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
|
||||
setInputDataBlock(pChildOp, pChildOp->exprSupp.pCtx, pBlock, pChInfo->order, MAIN_SCAN, true);
|
||||
doHashInterval(pChildOp, pBlock, pBlock->info.groupId, NULL);
|
||||
pChInfo->twAggSup.maxTs = TMAX(pChInfo->twAggSup.maxTs, pBlock->info.window.ekey);
|
||||
|
||||
if (needBreak(pInfo)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
maxTs = TMAX(maxTs, pBlock->info.window.ekey);
|
||||
}
|
||||
|
||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
|
||||
if (IS_FINAL_OP(pInfo)) {
|
||||
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pUpdated);
|
||||
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup,
|
||||
&pInfo->interval, pInfo->pPullDataMap, pUpdated);
|
||||
closeChildIntervalWindow(pInfo->pChildren, pInfo->twAggSup.maxTs);
|
||||
}
|
||||
|
||||
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
|
||||
initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
|
||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
if (pInfo->binfo.pRes->info.rows == 0) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
if (pInfo->pUpdateRes->info.rows == 0) {
|
||||
return NULL;
|
||||
}
|
||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->binfo.pRes;
|
||||
}
|
||||
|
||||
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
|
||||
pInfo->returnUpdate = false;
|
||||
ASSERT(!IS_FINAL_OP(pInfo));
|
||||
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
// process the rest of the data
|
||||
pOperator->status = OP_OPENED;
|
||||
return pInfo->pUpdateRes;
|
||||
}
|
||||
return pInfo->binfo.pRes;
|
||||
|
||||
doBuildPullDataBlock(pInfo->pPullWins, &pInfo->pullIndex, pInfo->pPullDataRes);
|
||||
if (pInfo->pPullDataRes->info.rows != 0) {
|
||||
// process the rest of the data
|
||||
ASSERT(IS_FINAL_OP(pInfo));
|
||||
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->pPullDataRes;
|
||||
}
|
||||
// ASSERT(false);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SSDataBlock* createPullDataBlock() {
|
||||
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
|
||||
pBlock->info.hasVarCol = false;
|
||||
pBlock->info.groupId = 0;
|
||||
pBlock->info.rows = 0;
|
||||
pBlock->info.type = STREAM_RETRIEVE;
|
||||
pBlock->info.rowSize = sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t);
|
||||
|
||||
pBlock->pDataBlock = taosArrayInit(3, sizeof(SColumnInfoData));
|
||||
SColumnInfoData infoData = {0};
|
||||
infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
infoData.info.bytes = sizeof(TSKEY);
|
||||
// window start ts
|
||||
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||
// window end ts
|
||||
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||
|
||||
infoData.info.type = TSDB_DATA_TYPE_UBIGINT;
|
||||
infoData.info.bytes = sizeof(uint64_t);
|
||||
taosArrayPush(pBlock->pDataBlock, &infoData);
|
||||
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||
|
@ -2412,23 +2640,30 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
|
|||
goto _error;
|
||||
}
|
||||
}
|
||||
// semi interval operator does not catch result
|
||||
pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
|
||||
pInfo->pUpdateRes->info.type = STREAM_REPROCESS;
|
||||
pInfo->pUpdateRes->info.type = STREAM_CLEAR;
|
||||
blockDataEnsureCapacity(pInfo->pUpdateRes, 128);
|
||||
pInfo->returnUpdate = false;
|
||||
|
||||
pInfo->pPhyNode = (SPhysiNode*)nodesCloneNode((SNode*)pPhyNode);
|
||||
|
||||
if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) {
|
||||
pInfo->isFinal = true;
|
||||
pOperator->name = "StreamFinalIntervalOperator";
|
||||
} else {
|
||||
// semi interval operator does not catch result
|
||||
pInfo->isFinal = false;
|
||||
pOperator->name = "StreamSemiIntervalOperator";
|
||||
}
|
||||
|
||||
if (!IS_FINAL_OP(pInfo)) {
|
||||
if (!IS_FINAL_OP(pInfo) || numOfChild == 0) {
|
||||
pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
|
||||
}
|
||||
pInfo->pPullWins = taosArrayInit(8, sizeof(SPullWindowInfo));
|
||||
pInfo->pullIndex = 0;
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
|
||||
pInfo->pPullDataRes = createPullDataBlock();
|
||||
|
||||
pOperator->operatorType = pPhyNode->type;
|
||||
pOperator->blocking = true;
|
||||
|
@ -2811,11 +3046,6 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex,
|
|||
}
|
||||
}
|
||||
|
||||
typedef struct SWinRes {
|
||||
TSKEY ts;
|
||||
uint64_t groupId;
|
||||
} SWinRes;
|
||||
|
||||
static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SHashObj* pStUpdated,
|
||||
SHashObj* pStDeleted, bool hasEndTs) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
@ -3027,14 +3257,14 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
|
|||
} else if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
|
||||
if (pInfo->pDelRes->info.rows > 0) {
|
||||
/*printDataBlock(pInfo->pDelRes, "session del");*/
|
||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo)? "Final Session" : "Single Session");
|
||||
return pInfo->pDelRes;
|
||||
}
|
||||
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
|
||||
if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
|
||||
doSetOperatorCompleted(pOperator);
|
||||
}
|
||||
/*printDataBlock(pBInfo->pRes, "session insert");*/
|
||||
printDataBlock(pBInfo->pRes, IS_FINAL_OP(pInfo)? "Final Session" : "Single Session");
|
||||
return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes;
|
||||
}
|
||||
|
||||
|
@ -3048,7 +3278,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (pBlock->info.type == STREAM_REPROCESS) {
|
||||
if (pBlock->info.type == STREAM_CLEAR) {
|
||||
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
|
||||
doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, 0, pOperator->exprSupp.numOfExprs,
|
||||
pInfo->gap, pWins);
|
||||
|
@ -3102,11 +3332,11 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
|
|||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||
doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
|
||||
if (pInfo->pDelRes->info.rows > 0) {
|
||||
/*printDataBlock(pInfo->pDelRes, "session del");*/
|
||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo)? "Final Session" : "Single Session");
|
||||
return pInfo->pDelRes;
|
||||
}
|
||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
|
||||
/*printDataBlock(pBInfo->pRes, "session insert");*/
|
||||
printDataBlock(pBInfo->pRes, IS_FINAL_OP(pInfo)? "Final Session" : "Single Session");
|
||||
return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes;
|
||||
}
|
||||
|
||||
|
@ -3169,11 +3399,11 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
|
|||
while (1) {
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
clearUpdateDataBlock(pInfo->pUpdateRes);
|
||||
clearSpecialDataBlock(pInfo->pUpdateRes);
|
||||
break;
|
||||
}
|
||||
|
||||
if (pBlock->info.type == STREAM_REPROCESS) {
|
||||
if (pBlock->info.type == STREAM_CLEAR) {
|
||||
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
|
||||
doClearSessionWindows(&pInfo->streamAggSup, pSup, pBlock, 0, pSup->numOfExprs, pInfo->gap, pWins);
|
||||
removeSessionResults(pStUpdated, pWins);
|
||||
|
@ -3236,7 +3466,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
|
|||
} else {
|
||||
pInfo->isFinal = false;
|
||||
pInfo->pUpdateRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
|
||||
pInfo->pUpdateRes->info.type = STREAM_REPROCESS;
|
||||
pInfo->pUpdateRes->info.type = STREAM_CLEAR;
|
||||
blockDataEnsureCapacity(pInfo->pUpdateRes, 128);
|
||||
pOperator->name = "StreamSessionSemiAggOperator";
|
||||
pOperator->fpSet =
|
||||
|
@ -3554,7 +3784,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (pBlock->info.type == STREAM_REPROCESS) {
|
||||
if (pBlock->info.type == STREAM_CLEAR) {
|
||||
doClearStateWindows(&pInfo->streamAggSup, pBlock, pInfo->primaryTsIndex, &pInfo->stateCol, pInfo->stateCol.slotId,
|
||||
pSeUpdated, pInfo->pSeDeleted);
|
||||
continue;
|
||||
|
@ -3900,18 +4130,22 @@ _error:
|
|||
// merge interval operator
|
||||
typedef struct SMergeIntervalAggOperatorInfo {
|
||||
SIntervalAggOperatorInfo intervalAggOperatorInfo;
|
||||
|
||||
SHashObj* groupIntervalHash;
|
||||
void* groupIntervalIter;
|
||||
SList* groupIntervals;
|
||||
SListIter groupIntervalsIter;
|
||||
bool hasGroupId;
|
||||
uint64_t groupId;
|
||||
SSDataBlock* prefetchedBlock;
|
||||
bool inputBlocksFinished;
|
||||
} SMergeIntervalAggOperatorInfo;
|
||||
|
||||
typedef struct SGroupTimeWindow {
|
||||
uint64_t groupId;
|
||||
STimeWindow window;
|
||||
} SGroupTimeWindow;
|
||||
|
||||
void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SMergeIntervalAggOperatorInfo* miaInfo = (SMergeIntervalAggOperatorInfo*)param;
|
||||
taosHashCleanup(miaInfo->groupIntervalHash);
|
||||
tdListFree(miaInfo->groupIntervals);
|
||||
destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput);
|
||||
}
|
||||
|
||||
|
@ -3940,15 +4174,22 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
|
|||
bool ascScan = (iaInfo->order == TSDB_ORDER_ASC);
|
||||
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
|
||||
|
||||
STimeWindow* prevWin = taosHashGet(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId));
|
||||
if (prevWin == NULL) {
|
||||
taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow));
|
||||
return 0;
|
||||
}
|
||||
SGroupTimeWindow groupTimeWindow = {.groupId = tableGroupId, .window = *newWin};
|
||||
tdListAppend(miaInfo->groupIntervals, &groupTimeWindow);
|
||||
|
||||
if ((ascScan && newWin->skey > prevWin->skey || (!ascScan) && newWin->skey < prevWin->skey)) {
|
||||
finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock);
|
||||
taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow));
|
||||
SListIter iter = {0};
|
||||
tdListInitIter(miaInfo->groupIntervals, &iter, TD_LIST_FORWARD);
|
||||
SListNode* listNode = NULL;
|
||||
while ((listNode = tdListNext(&iter)) != NULL) {
|
||||
SGroupTimeWindow* prevGrpWin = (SGroupTimeWindow*)listNode->data;
|
||||
if (prevGrpWin->groupId != tableGroupId ) {
|
||||
continue;
|
||||
}
|
||||
STimeWindow* prevWin = &prevGrpWin->window;
|
||||
if ((ascScan && newWin->skey > prevWin->ekey || (!ascScan) && newWin->skey < prevWin->ekey)) {
|
||||
finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock);
|
||||
tdListPopNode(miaInfo->groupIntervals, listNode);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -4075,6 +4316,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
if (pBlock == NULL) {
|
||||
tdListInitIter(miaInfo->groupIntervals, &miaInfo->groupIntervalsIter, TD_LIST_FORWARD);
|
||||
miaInfo->inputBlocksFinished = true;
|
||||
break;
|
||||
}
|
||||
|
@ -4100,14 +4342,12 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
if (miaInfo->inputBlocksFinished) {
|
||||
void* win = taosHashIterate(miaInfo->groupIntervalHash, miaInfo->groupIntervalIter);
|
||||
if (win != NULL) {
|
||||
miaInfo->groupIntervalIter = win;
|
||||
SListNode* listNode = tdListNext(&miaInfo->groupIntervalsIter);
|
||||
|
||||
size_t len = 0;
|
||||
uint64_t* pTableGroupId = taosHashGetKey(win, &len);
|
||||
finalizeWindowResult(pOperator, *pTableGroupId, win, pRes);
|
||||
pRes->info.groupId = *pTableGroupId;
|
||||
if (listNode != NULL) {
|
||||
SGroupTimeWindow* grpWin = (SGroupTimeWindow*)(listNode->data);
|
||||
finalizeWindowResult(pOperator, grpWin->groupId, &grpWin->window, pRes);
|
||||
pRes->info.groupId = grpWin->groupId;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4129,8 +4369,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
|
|||
goto _error;
|
||||
}
|
||||
|
||||
miaInfo->groupIntervalHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, HASH_NO_LOCK);
|
||||
miaInfo->groupIntervalIter = NULL;
|
||||
miaInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow));
|
||||
|
||||
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
|
||||
|
||||
|
|
|
@ -115,6 +115,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
|
|||
.srcNodeId = pTask->nodeId,
|
||||
.srcTaskId = pTask->taskId,
|
||||
.pRetrieve = pRetrieve,
|
||||
.retrieveLen = dataStrLen,
|
||||
};
|
||||
|
||||
int32_t sz = taosArrayGetSize(pTask->childEpInfo);
|
||||
|
@ -146,7 +147,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
|
|||
.code = 0,
|
||||
.msgType = TDMT_STREAM_RETRIEVE,
|
||||
.pCont = buf,
|
||||
.contLen = len,
|
||||
.contLen = sizeof(SMsgHead) + len,
|
||||
};
|
||||
|
||||
if (tmsgSendReq(&pEpInfo->epSet, &rpcMsg) < 0) {
|
||||
|
|
|
@ -45,11 +45,16 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
|
|||
ASSERT(false);
|
||||
}
|
||||
if (output == NULL) {
|
||||
if (pItem->type == STREAM_INPUT__DATA_RETRIEVE && !hasData) {
|
||||
SSDataBlock block = {0};
|
||||
block.info.type = STREAM_PUSH_DATA;
|
||||
block.info.childId = pTask->selfChildId;
|
||||
taosArrayPush(pRes, &block);
|
||||
if (pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
|
||||
//SSDataBlock block = {0};
|
||||
//block.info.type = STREAM_PUSH_EMPTY;
|
||||
//block.info.childId = pTask->selfChildId;
|
||||
SStreamDataBlock* pRetrieveBlock = (SStreamDataBlock*)data;
|
||||
ASSERT(taosArrayGetSize(pRetrieveBlock->blocks) == 1);
|
||||
SSDataBlock* pBlock = createOneDataBlock(taosArrayGet(pRetrieveBlock->blocks, 0), true);
|
||||
pBlock->info.type = STREAM_PUSH_EMPTY;
|
||||
pBlock->info.childId = pTask->selfChildId;
|
||||
taosArrayPush(pRes, pBlock);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -109,7 +114,7 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
|
|||
if (type == STREAM_INPUT__TRIGGER) {
|
||||
blockDataDestroy(((SStreamTrigger*)data)->pBlock);
|
||||
taosFreeQitem(data);
|
||||
} else if (type == STREAM_INPUT__DATA_BLOCK) {
|
||||
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE) {
|
||||
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock);
|
||||
taosFreeQitem(data);
|
||||
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
|
|
|
@ -119,6 +119,7 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma
|
|||
taosArrayPush(pInfo->pTsBuckets, &dumy);
|
||||
}
|
||||
pInfo->numBuckets = DEFAULT_BUCKET_SIZE;
|
||||
pInfo->pCloseWinSBF = NULL;
|
||||
return pInfo;
|
||||
}
|
||||
|
||||
|
@ -154,6 +155,9 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, tb_uid_t tableId, TSKEY ts) {
|
|||
TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
|
||||
if (ts < maxTs - pInfo->watermark) {
|
||||
// this window has been closed.
|
||||
if (pInfo->pCloseWinSBF) {
|
||||
return tScalableBfPut(pInfo->pCloseWinSBF, &ts, sizeof(TSKEY));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -193,3 +197,19 @@ void updateInfoDestroy(SUpdateInfo *pInfo) {
|
|||
taosArrayDestroy(pInfo->pTsSBFs);
|
||||
taosMemoryFree(pInfo);
|
||||
}
|
||||
|
||||
void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo) {
|
||||
if (pInfo->pCloseWinSBF) {
|
||||
return;
|
||||
}
|
||||
int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND);
|
||||
pInfo->pCloseWinSBF = tScalableBfInit(rows, DEFAULT_FALSE_POSITIVE);
|
||||
}
|
||||
|
||||
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo) {
|
||||
if (!pInfo || !pInfo->pCloseWinSBF) {
|
||||
return;
|
||||
}
|
||||
tScalableBfDestroy(pInfo->pCloseWinSBF);
|
||||
pInfo->pCloseWinSBF = NULL;
|
||||
}
|
||||
|
|
|
@ -914,6 +914,9 @@ void syncNodeStart(SSyncNode* pSyncNode) {
|
|||
syncNodeBecomeLeader(pSyncNode, "one replica start");
|
||||
|
||||
// Raft 3.6.2 Committing entries from previous terms
|
||||
syncNodeAppendNoop(pSyncNode);
|
||||
syncMaybeAdvanceCommitIndex(pSyncNode);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1662,6 +1665,12 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
|
|||
// change isStandBy to normal (election timeout)
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
|
||||
syncNodeBecomeLeader(pSyncNode, tmpbuf);
|
||||
|
||||
// Raft 3.6.2 Committing entries from previous terms
|
||||
syncNodeReplicate(pSyncNode);
|
||||
syncNodeAppendNoop(pSyncNode);
|
||||
syncMaybeAdvanceCommitIndex(pSyncNode);
|
||||
|
||||
} else {
|
||||
syncNodeBecomeFollower(pSyncNode, tmpbuf);
|
||||
}
|
||||
|
@ -1807,16 +1816,9 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) {
|
|||
// stop elect timer
|
||||
syncNodeStopElectTimer(pSyncNode);
|
||||
|
||||
// start replicate right now!
|
||||
syncNodeReplicate(pSyncNode);
|
||||
|
||||
// start heartbeat timer
|
||||
syncNodeStartHeartbeatTimer(pSyncNode);
|
||||
|
||||
// append noop
|
||||
syncNodeAppendNoop(pSyncNode);
|
||||
syncMaybeAdvanceCommitIndex(pSyncNode); // maybe only one replica
|
||||
|
||||
// trace log
|
||||
do {
|
||||
int32_t debugStrLen = strlen(debugStr);
|
||||
|
@ -1841,9 +1843,9 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) {
|
|||
syncNodeLog2("==state change syncNodeCandidate2Leader==", pSyncNode);
|
||||
|
||||
// Raft 3.6.2 Committing entries from previous terms
|
||||
|
||||
// do not use this
|
||||
// syncNodeEqNoop(pSyncNode);
|
||||
syncNodeReplicate(pSyncNode);
|
||||
syncNodeAppendNoop(pSyncNode);
|
||||
syncMaybeAdvanceCommitIndex(pSyncNode);
|
||||
}
|
||||
|
||||
void syncNodeFollower2Candidate(SSyncNode* pSyncNode) {
|
||||
|
|
|
@ -104,6 +104,8 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
pRead->pReadLogTFile = pLogTFile;
|
||||
|
||||
walBuildIdxName(pRead->pWal, fileFirstVer, fnameStr);
|
||||
TdFilePtr pIdxTFile = taosOpenFile(fnameStr, TD_FILE_READ);
|
||||
if (pIdxTFile == NULL) {
|
||||
|
@ -112,7 +114,6 @@ static int32_t walReadChangeFile(SWalReadHandle *pRead, int64_t fileFirstVer) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
pRead->pReadLogTFile = pLogTFile;
|
||||
pRead->pReadIdxTFile = pIdxTFile;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -671,7 +671,7 @@ void taosFprintfFile(TdFilePtr pFile, const char *format, ...) {
|
|||
fflush(pFile->fp);
|
||||
}
|
||||
|
||||
bool taosValidFile(TdFilePtr pFile) { return pFile != NULL; }
|
||||
bool taosValidFile(TdFilePtr pFile) { return pFile != NULL && pFile->fd > 0; }
|
||||
|
||||
int32_t taosUmaskFile(int32_t maskVal) {
|
||||
#ifdef WINDOWS
|
||||
|
|
|
@ -24,7 +24,7 @@ class ClusterDnodes(TDDnodes):
|
|||
|
||||
|
||||
class ConfigureyCluster:
|
||||
"""This will create defined number of dnodes and create a cluset.
|
||||
"""This will create defined number of dnodes and create a cluster.
|
||||
at the same time, it will return TDDnodes list: dnodes, """
|
||||
hostname= socket.gethostname()
|
||||
|
||||
|
@ -85,8 +85,8 @@ class ConfigureyCluster:
|
|||
count+=1
|
||||
time.sleep(1)
|
||||
else:
|
||||
tdLog.debug("create cluster with %d dnode but check dnode not ready within 5s ! "%self.dnodeNums)
|
||||
return -1
|
||||
tdLog.exit("create cluster with %d dnode but check dnode not ready within 5s ! "%self.dnodeNums)
|
||||
|
||||
|
||||
|
||||
cluster = ConfigureyCluster()
|
|
@ -508,7 +508,6 @@ class TDDnode:
|
|||
|
||||
def stoptaosd(self):
|
||||
if (not self.remoteIP == ""):
|
||||
print("123")
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||
tdLog.info("stop dnode%d"%self.index)
|
||||
return
|
||||
|
@ -518,18 +517,21 @@ class TDDnode:
|
|||
toBeKilled = "valgrind.bin"
|
||||
|
||||
if self.running != 0:
|
||||
psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}'" % (toBeKilled,self.index)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
if platform.system().lower() == 'windows':
|
||||
os.system("wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId | xargs echo | awk '{print $2}' | xargs taskkill -f -pid"%self.index)
|
||||
else:
|
||||
psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}'" % (toBeKilled,self.index)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
while(processID):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
if self.valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
@echo off
|
||||
SETLOCAL EnableDelayedExpansion
|
||||
for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a")
|
||||
set /a a=0
|
||||
echo Windows Taosd Full Test
|
||||
set /a exitNum=0
|
||||
rm -rf failed.txt
|
||||
set caseFile="jenkins\\basic.txt"
|
||||
if not "%2" == "" (
|
||||
set caseFile="%2"
|
||||
)
|
||||
for /F "usebackq tokens=*" %%i in (!caseFile!) do (
|
||||
set line=%%i
|
||||
if "!line:~,9!" == "./test.sh" (
|
||||
set /a a+=1
|
||||
echo !a! Processing %%i
|
||||
call :GetTimeSeconds !time!
|
||||
set time1=!_timeTemp!
|
||||
echo Start at !time!
|
||||
call !line:./test.sh=wtest.bat! > result_!a!.txt 2>error_!a!.txt
|
||||
if errorlevel 1 ( call :colorEcho 0c "failed" &echo. && set /a exitNum=8 && echo %%i >>failed.txt ) else ( call :colorEcho 0a "Success" &echo. )
|
||||
)
|
||||
)
|
||||
exit !exitNum!
|
||||
|
||||
:colorEcho
|
||||
set timeNow=%time%
|
||||
call :GetTimeSeconds %timeNow%
|
||||
set time2=%_timeTemp%
|
||||
set /a interTime=%time2% - %time1%
|
||||
echo End at %timeNow% , cast %interTime%s
|
||||
echo off
|
||||
<nul set /p ".=%DEL%" > "%~2"
|
||||
findstr /v /a:%1 /R "^$" "%~2" nul
|
||||
del "%~2" > nul 2>&1i
|
||||
goto :eof
|
||||
|
||||
:GetTimeSeconds
|
||||
set tt=%1
|
||||
set tt=%tt:.= %
|
||||
set tt=%tt::= %
|
||||
set tt=%tt: 0= %
|
||||
set /a index=1
|
||||
for %%a in (%tt%) do (
|
||||
if !index! EQU 1 (
|
||||
set /a hh=%%a
|
||||
)^
|
||||
else if !index! EQU 2 (
|
||||
set /a mm=%%a
|
||||
|
||||
)^
|
||||
else if !index! EQU 3 (
|
||||
set /a ss=%%a
|
||||
)
|
||||
set /a index=index+1
|
||||
)
|
||||
set /a _timeTemp=(%hh%*60+%mm%)*60+%ss%
|
||||
goto :eof
|
|
@ -80,17 +80,17 @@ endi
|
|||
|
||||
if $data03 != 4 then
|
||||
print ======$data03
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data04 != 52 then
|
||||
print ======$data04
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data05 != 13 then
|
||||
print ======$data05
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
# row 1
|
||||
|
@ -179,7 +179,7 @@ sql use test1;
|
|||
sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int);
|
||||
sql create table ts1 using st tags(1,1,1);
|
||||
sql create table ts2 using st tags(2,2,2);
|
||||
sql create stream stream_t2 trigger at_once into streamtST1 as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st interval(10s) ;
|
||||
sql create stream stream_t2 trigger at_once watermark 20s into streamtST1 as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st interval(10s) ;
|
||||
|
||||
sql insert into ts1 values(1648791211000,1,2,3);
|
||||
sql insert into ts1 values(1648791222001,2,2,3);
|
||||
|
|
|
@ -74,7 +74,7 @@ sql create stable st(ts timestamp,a int,b int,c int,id int) tags(ta int,tb int,t
|
|||
sql create table ts1 using st tags(1,1,1);
|
||||
sql create table ts2 using st tags(2,2,2);
|
||||
|
||||
sql create stream stream_t2 trigger at_once into streamtST as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by ta interval(10s) ;
|
||||
sql create stream stream_t2 trigger at_once watermark 20s into streamtST as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by ta interval(10s) ;
|
||||
sql insert into ts1 values(1648791211000,1,2,3,1);
|
||||
sql insert into ts1 values(1648791222001,2,2,3,2);
|
||||
sql insert into ts2 values(1648791211000,1,2,3,3);
|
||||
|
@ -83,16 +83,16 @@ sql insert into ts2 values(1648791222001,2,2,3,4);
|
|||
sql insert into ts2 values(1648791222002,2,2,3,5);
|
||||
sql insert into ts2 values(1648791222002,2,2,3,6);
|
||||
|
||||
sql insert into ts1 values(1648791211000,1,2,3,1);
|
||||
sql insert into ts1 values(1648791222001,2,2,3,2);
|
||||
sql insert into ts2 values(1648791211000,1,2,3,3);
|
||||
sql insert into ts2 values(1648791222001,2,2,3,4);
|
||||
sql insert into ts1 values(1648791211000,1,2,3,7);
|
||||
sql insert into ts1 values(1648791222001,2,2,3,8);
|
||||
sql insert into ts2 values(1648791211000,1,2,3,9);
|
||||
sql insert into ts2 values(1648791222001,2,2,3,10);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop2:
|
||||
sleep 300
|
||||
sql select * from streamtST;
|
||||
sql select * from streamtST order by c7 asc;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
|
@ -104,8 +104,18 @@ print =====data01=$data01
|
|||
goto loop2
|
||||
endi
|
||||
|
||||
if $data02 != 1 then
|
||||
print =====data02=$data02
|
||||
if $data11 != 1 then
|
||||
print =====data11=$data11
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data21 != 1 then
|
||||
print =====data21=$data21
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data31 != 2 then
|
||||
print =====data31=$data31
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
|
@ -114,8 +124,18 @@ print =====data03=$data03
|
|||
goto loop2
|
||||
endi
|
||||
|
||||
if $data04 != 2 then
|
||||
print =====data04=$data04
|
||||
if $data13 != 2 then
|
||||
print =====data13=$data13
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data23 != 1 then
|
||||
print =====data23=$data23
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
if $data33 != 4 then
|
||||
print =====data33=$data33
|
||||
goto loop2
|
||||
endi
|
||||
|
||||
|
|
|
@ -79,17 +79,17 @@ endi
|
|||
|
||||
if $data03 != 4 then
|
||||
print ======$data03
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data04 != 52 then
|
||||
print ======$data04
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
if $data05 != 13 then
|
||||
print ======$data05
|
||||
return -1
|
||||
goto loop1
|
||||
endi
|
||||
|
||||
# row 1
|
||||
|
|
|
@ -13,9 +13,6 @@ print user sysinfo0 login
|
|||
sql close
|
||||
sql connect sysinfo0
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop
|
||||
return
|
||||
|
||||
print =============== check oper
|
||||
sql_error create user u1 pass 'u1'
|
||||
sql_error drop user sysinfo1
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'ttlUnit':5,'ttlPushInterval':3}
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.ntbname = 'ntb'
|
||||
self.stbname = 'stb'
|
||||
self.tbnum = 10
|
||||
self.ttl_param = 1
|
||||
self.default_ttl = 100
|
||||
self.modify_ttl = 1
|
||||
def ttl_check_ntb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
for i in range(self.tbnum):
|
||||
tdSql.execute(f'create table {self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.ttl_param}')
|
||||
tdSql.query(f'show tables')
|
||||
tdSql.checkRows(self.tbnum)
|
||||
sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'])
|
||||
tdSql.query(f'show tables')
|
||||
tdSql.checkRows(0)
|
||||
for i in range(self.tbnum):
|
||||
tdSql.execute(f'create table {self.ntbname}_{i} (ts timestamp,c0 int) ttl {self.default_ttl}')
|
||||
for i in range(int(self.tbnum/2)):
|
||||
tdSql.execute(f'alter table {self.ntbname}_{i} ttl {self.modify_ttl}')
|
||||
sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'])
|
||||
tdSql.query(f'show tables')
|
||||
tdSql.checkRows(self.tbnum - int(self.tbnum/2))
|
||||
tdSql.execute('drop database db')
|
||||
def ttl_check_ctb(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute(f'create table {self.stbname} (ts timestamp,c0 int) tags(t0 int)')
|
||||
|
||||
for i in range(self.tbnum):
|
||||
tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({i}) ttl {self.ttl_param}')
|
||||
tdSql.query(f'show tables')
|
||||
tdSql.checkRows(self.tbnum)
|
||||
sleep(self.updatecfgDict['ttlUnit']*self.ttl_param+self.updatecfgDict['ttlPushInterval'])
|
||||
tdSql.query(f'show tables')
|
||||
tdSql.checkRows(0)
|
||||
for i in range(self.tbnum):
|
||||
tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({i}) ttl {self.default_ttl}')
|
||||
tdSql.query(f'show tables')
|
||||
tdSql.checkRows(self.tbnum)
|
||||
for i in range(int(self.tbnum/2)):
|
||||
tdSql.execute(f'alter table {self.stbname}_{i} ttl {self.modify_ttl}')
|
||||
sleep(self.updatecfgDict['ttlUnit']*self.modify_ttl+self.updatecfgDict['ttlPushInterval'])
|
||||
tdSql.query(f'show tables')
|
||||
tdSql.checkRows(self.tbnum - int(self.tbnum/2))
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
def run(self):
|
||||
self.ttl_check_ntb()
|
||||
self.ttl_check_ctb()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -17,100 +17,140 @@ from util.log import *
|
|||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
|
||||
from util.sqlset import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.dbname = 'db_test'
|
||||
self.setsql = TDSetSql()
|
||||
self.ntbname = 'ntb'
|
||||
self.rowNum = 10
|
||||
self.tbnum = 20
|
||||
self.ts = 1537146000000
|
||||
self.binary_str = 'taosdata'
|
||||
self.nchar_str = '涛思数据'
|
||||
def bottom_check_base(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table stb_1 using stb tags('beijing')")
|
||||
column_list = ['col1','col2','col3','col4','col5','col6','col7','col8']
|
||||
error_column_list = ['col11','col12','col13']
|
||||
error_param_list = [0,101]
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
self.column_dict = {
|
||||
'ts' : 'timestamp',
|
||||
'col1': 'tinyint',
|
||||
'col2': 'smallint',
|
||||
'col3': 'int',
|
||||
'col4': 'bigint',
|
||||
'col5': 'tinyint unsigned',
|
||||
'col6': 'smallint unsigned',
|
||||
'col7': 'int unsigned',
|
||||
'col8': 'bigint unsigned',
|
||||
'col9': 'float',
|
||||
'col10': 'double',
|
||||
'col11': 'bool',
|
||||
'col12': 'binary(20)',
|
||||
'col13': 'nchar(20)'
|
||||
}
|
||||
|
||||
for i in column_list:
|
||||
tdSql.query(f'select bottom({i},2) from stb_1')
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)])
|
||||
for j in error_param_list:
|
||||
tdSql.error(f'select bottom({i},{j}) from stb_1')
|
||||
for i in error_column_list:
|
||||
tdSql.error(f'select bottom({i},10) from stb_1')
|
||||
tdSql.query("select ts,bottom(col1, 2),ts from stb_1 group by tbname")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query('select bottom(col2,1) from stb_1 interval(1y) order by col2')
|
||||
tdSql.checkData(0,0,1)
|
||||
|
||||
tdSql.error('select * from stb_1 where bottom(col2,1)=1')
|
||||
tdSql.execute('drop database db')
|
||||
def bottom_check_distribute(self):
|
||||
# prepare data for vgroup 4
|
||||
dbname = tdCom.getLongName(5, "letters")
|
||||
self.param_list = [1,100]
|
||||
def insert_data(self,column_dict,tbname,row_num):
|
||||
sql = ''
|
||||
for k, v in column_dict.items():
|
||||
if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \
|
||||
v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool':
|
||||
sql += '%d,'
|
||||
elif v.lower() == 'float' or v.lower() == 'double':
|
||||
sql += '%f,'
|
||||
elif 'binary' in v.lower():
|
||||
sql += f'"{self.binary_str}%d",'
|
||||
elif 'nchar' in v.lower():
|
||||
sql += f'"{self.nchar_str}%d",'
|
||||
insert_sql = f'insert into {tbname} values({sql[:-1]})'
|
||||
for i in range(row_num):
|
||||
insert_list = []
|
||||
for k, v in column_dict.items():
|
||||
if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\
|
||||
'binary' in v.lower() or 'nchar' in v.lower():
|
||||
insert_list.append(0 + i)
|
||||
elif v.lower() == 'float' or v.lower() == 'double':
|
||||
insert_list.append(0.1 + i)
|
||||
elif v.lower() == 'bool':
|
||||
insert_list.append(i % 2)
|
||||
elif v.lower() == 'timestamp':
|
||||
insert_list.append(self.ts + i)
|
||||
tdSql.execute(insert_sql%(tuple(insert_list)))
|
||||
def bottom_check_data(self,tbname,tb_type):
|
||||
new_column_dict = {}
|
||||
for param in self.param_list:
|
||||
for k,v in self.column_dict.items():
|
||||
if v.lower() in ['tinyint','smallint','int','bigint','tinyint unsigned','smallint unsigned','int unsigned','bigint unsigned']:
|
||||
tdSql.query(f'select bottom({k},{param}) from {tbname} order by {k}')
|
||||
if param >= self.rowNum:
|
||||
if tb_type in ['normal_table','child_table']:
|
||||
tdSql.checkRows(self.rowNum)
|
||||
values_list = []
|
||||
for i in range(self.rowNum):
|
||||
tp = (i,)
|
||||
values_list.append(tp)
|
||||
tdSql.checkEqual(tdSql.queryResult,values_list)
|
||||
elif tb_type == 'stable':
|
||||
tdSql.checkRows(param)
|
||||
elif param < self.rowNum:
|
||||
if tb_type in ['normal_table','child_table']:
|
||||
tdSql.checkRows(param)
|
||||
values_list = []
|
||||
for i in range(param):
|
||||
tp = (i,)
|
||||
values_list.append(tp)
|
||||
tdSql.checkEqual(tdSql.queryResult,values_list)
|
||||
elif tb_type == 'stable':
|
||||
tdSql.checkRows(param)
|
||||
for i in [self.param_list[0]-1,self.param_list[-1]+1]:
|
||||
tdSql.error(f'select top({k},{i}) from {tbname}')
|
||||
new_column_dict.update({k:v})
|
||||
elif v.lower() == 'bool' or 'binary' in v.lower() or 'nchar' in v.lower():
|
||||
tdSql.error(f'select top({k},{param}) from {tbname}')
|
||||
tdSql.error(f'select * from {tbname} where top({k},{param})=1')
|
||||
pass
|
||||
def bottom_check_ntb(self):
|
||||
tdSql.execute(f'create database if not exists {self.dbname} vgroups 1')
|
||||
tdSql.execute(f'use {self.dbname}')
|
||||
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
|
||||
self.insert_data(self.column_dict,self.ntbname,self.rowNum)
|
||||
self.bottom_check_data(self.ntbname,'normal_table')
|
||||
tdSql.execute(f'drop database {self.dbname}')
|
||||
def bottom_check_stb(self):
|
||||
stbname = tdCom.getLongName(5, "letters")
|
||||
vgroup_num = 2
|
||||
child_table_num = 20
|
||||
tdSql.execute(f"create database if not exists {dbname} vgroups {vgroup_num}")
|
||||
tdSql.execute(f'use {dbname}')
|
||||
# build 20 child tables,every table insert 10 rows
|
||||
tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
|
||||
for i in range(child_table_num):
|
||||
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')")
|
||||
tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i))
|
||||
column_list = ['col1','col2','col3','col4','col5','col6','col7','col8']
|
||||
error_column_list = ['col11','col12','col13']
|
||||
error_param_list = [0,101]
|
||||
for i in [f'{stbname}', f'{dbname}.{stbname}']:
|
||||
for j in column_list:
|
||||
tdSql.query(f"select bottom({j},1) from {i}")
|
||||
tdSql.checkRows(0)
|
||||
tag_dict = {
|
||||
't0':'int'
|
||||
}
|
||||
tag_values = [
|
||||
f'1'
|
||||
]
|
||||
tdSql.execute(f"create database if not exists {self.dbname} vgroups 2")
|
||||
tdSql.execute(f'use {self.dbname}')
|
||||
tdSql.execute(self.setsql.set_create_stable_sql(stbname,self.column_dict,tag_dict))
|
||||
for i in range(self.tbnum):
|
||||
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})")
|
||||
tdSql.execute(self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum))
|
||||
tdSql.query('show tables')
|
||||
vgroup_list = []
|
||||
for i in range(len(tdSql.queryResult)):
|
||||
vgroup_list.append(tdSql.queryResult[i][6])
|
||||
vgroup_list_set = set(vgroup_list)
|
||||
|
||||
for i in vgroup_list_set:
|
||||
vgroups_num = vgroup_list.count(i)
|
||||
if vgroups_num >=2:
|
||||
if vgroups_num >= 2:
|
||||
tdLog.info(f'This scene with {vgroups_num} vgroups is ok!')
|
||||
continue
|
||||
else:
|
||||
tdLog.exit(f'This scene does not meet the requirements with {vgroups_num} vgroup!\n')
|
||||
for i in range(self.rowNum):
|
||||
for j in range(child_table_num):
|
||||
tdSql.execute(f"insert into {stbname}_{j} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
for i in column_list:
|
||||
tdSql.query(f'select bottom({i},2) from {stbname}')
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkEqual(tdSql.queryResult,[(1,),(1,)])
|
||||
for j in error_param_list:
|
||||
tdSql.error(f'select bottom({i},{j}) from {stbname}')
|
||||
for i in error_column_list:
|
||||
tdSql.error(f'select bottom({i},10) from {stbname}')
|
||||
|
||||
tdSql.execute(f'drop database {dbname}')
|
||||
tdLog.exit(
|
||||
'This scene does not meet the requirements with {vgroups_num} vgroup!\n')
|
||||
for i in range(self.tbnum):
|
||||
self.bottom_check_data(f'{stbname}_{i}','child_table')
|
||||
self.bottom_check_data(f'{stbname}','stable')
|
||||
tdSql.execute(f'drop database {self.dbname}')
|
||||
|
||||
def run(self):
|
||||
|
||||
self.bottom_check_base()
|
||||
self.bottom_check_distribute()
|
||||
self.bottom_check_ntb()
|
||||
self.bottom_check_stb()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -11,12 +11,14 @@
|
|||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import imp
|
||||
import sys
|
||||
import taos
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
@ -29,6 +31,9 @@ class TDTestCase:
|
|||
return
|
||||
|
||||
def init(self, conn, logSql):
|
||||
self.testcasePath = os.path.split(__file__)[0]
|
||||
self.testcaseFilename = os.path.split(__file__)[-1]
|
||||
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
|
@ -557,6 +562,103 @@ class TDTestCase:
|
|||
tdSql.checkRows(3)
|
||||
tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
#math function
|
||||
tdSql.query("select sin(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select cos(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select tan(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select asin(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select acos(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select atan(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select abs(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select pow(dataint,5) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select log(dataint,10) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select sqrt(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select HISTOGRAM(dataint,'user_input','[1, 33, 555, 7777]',1) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select csum(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select mavg(dataint,1) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select statecount(dataint,'GE',10) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select stateduration(dataint,'GE',0) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select sample(dataint,3) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select HYPERLOGLOG(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select twa(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
# function not ready
|
||||
# tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
# tdSql.checkRows(1)
|
||||
|
||||
#str function
|
||||
tdSql.query("select upper(dataStr) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select ltrim(dataStr) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select lower(dataStr) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select rtrim(dataStr) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select CHAR_LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select SUBSTR(dataStr,5) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select CONCAT(dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select CONCAT_ws('adad!@!@%$^$%$^$%^a',dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select CAST(dataStr as bigint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
#time function
|
||||
tdSql.query("select now() from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select today() from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select TIMEZONE() from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select TO_ISO8601(ts) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select TO_UNIXTIMESTAMP(datastr) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select TIMETRUNCATE(ts,1u) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select TIMEDIFF(ts,_c0) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select TIMEDIFF(ts,1u) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select ELAPSED(ts,1h) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
#
|
||||
# #test TD-12077
|
||||
tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')")
|
||||
|
|
|
@ -20,7 +20,7 @@ class MyDnodes(TDDnodes):
|
|||
self.simDeployed = False
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
noConn = True
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
|
@ -40,7 +40,7 @@ class TDTestCase:
|
|||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
|
@ -81,7 +81,7 @@ class TDTestCase:
|
|||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
|
||||
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
|
||||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\""
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ class MyDnodes(TDDnodes):
|
|||
self.simDeployed = False
|
||||
|
||||
class TDTestCase:
|
||||
noConn = True
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
@ -40,7 +41,7 @@ class TDTestCase:
|
|||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
|
@ -85,7 +86,7 @@ class TDTestCase:
|
|||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
|
||||
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
|
||||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\""
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 10,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
dnodenumbers=int(dnodenumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
dbNumbers = int(dnodenumbers * restartNumber)
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
|
||||
tdLog.info("Take turns stopping all dnodes ")
|
||||
# seperate vnode and mnode in different dnodes.
|
||||
# create database and stable
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
while stopcount < restartNumber:
|
||||
for i in range(dnodenumbers):
|
||||
# threads=[]
|
||||
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
|
||||
paraDict["dbName"]= 'db%d%d'%(stopcount,i)
|
||||
threads=threading.Thread(target=clusterComCreate.create_database, args=(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']))
|
||||
threads.start()
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
if clusterComCheck.checkDnodes(dnodenumbers):
|
||||
# threads.join()
|
||||
tdLog.info("first restart loop")
|
||||
else:
|
||||
print("456")
|
||||
threads.join()
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
threads.join()
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
for i in range(restartNumber):
|
||||
clusterComCheck.checkDb(dnodenumbers,'db%d'%i)
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(5,3,1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,181 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 10,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
dnodenumbers=int(dnodenumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
dbNumbers = int(mnodeNums * restartNumber)
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
# seperate vnode and mnode in different dnodes.
|
||||
# create database and stable
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
while stopcount < restartNumber:
|
||||
tdLog.info("first restart loop")
|
||||
for i in range(mnodeNums):
|
||||
# threads=[]
|
||||
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
|
||||
paraDict["dbName"]= 'db%d%d'%(stopcount,i)
|
||||
threads=threading.Thread(target=clusterComCreate.create_database, args=(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']))
|
||||
threads.start()
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
if clusterComCheck.checkDnodes(dnodenumbers):
|
||||
# threads.join()
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
threads.join()
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
threads.join()
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
for i in range(restartNumber):
|
||||
clusterComCheck.checkDb(mnodeNums,'db%d'%i)
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(5,3,1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,181 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 10,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
dnodenumbers=int(dnodenumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodenumbers-mnodeNums)
|
||||
dbNumbers = int(vnodeNumbers * restartNumber)
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
|
||||
tdLog.info("Take turns stopping Vnodes ")
|
||||
# seperate vnode and mnode in different dnodes.
|
||||
# create database and stable
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
while stopcount < restartNumber:
|
||||
for i in range(vnodeNumbers):
|
||||
# threads=[]
|
||||
# threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
|
||||
paraDict["dbName"]= 'db%d%d'%(stopcount,i)
|
||||
threads=threading.Thread(target=clusterComCreate.create_database, args=(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']))
|
||||
threads.start()
|
||||
tdDnodes[mnodeNums+i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[mnodeNums+i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
if clusterComCheck.checkDnodes(vnodeNumbers):
|
||||
# threads.join()
|
||||
tdLog.info("first restart loop")
|
||||
else:
|
||||
print("456")
|
||||
threads.join()
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
threads.join()
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
for i in range(restartNumber):
|
||||
clusterComCheck.checkDb(vnodeNumbers,'db%d'%i)
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(5,3,1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -9,6 +9,11 @@ from util.sql import *
|
|||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
from util.common import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
|
@ -17,26 +22,13 @@ import threading
|
|||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
class MyDnodes(TDDnodes):
|
||||
def __init__(self ,dnodes_lists):
|
||||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
|
||||
def buildcluster(self,dnodenumber):
|
||||
self.depoly_cluster(dnodenumber)
|
||||
self.master_dnode = self.TDDnodes.dnodes[0]
|
||||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
# tdSql.init(conn.cursor())
|
||||
# self.host = socket.gethostname()
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
@ -106,52 +98,6 @@ class TDTestCase:
|
|||
tdSql.checkData(0,0,rowsPerSTable)
|
||||
return
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums=5,independent=True):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
start_port_sec = 6130
|
||||
for num in range(1, dnodes_nums+1):
|
||||
dnode = TDDnode(num)
|
||||
dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
|
||||
dnode.addExtraCfg("fqdn", f"{hostname}")
|
||||
dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
|
||||
dnode.addExtraCfg("monitorFqdn", hostname)
|
||||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
|
||||
# configure three dnoe don't support vnodes
|
||||
if independent and (num < 4):
|
||||
dnode.addExtraCfg("supportVnodes", 0)
|
||||
|
||||
dnodes.append(dnode)
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
self.TDDnodes.setValgrind(valgrind)
|
||||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
|
||||
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
|
||||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
|
||||
|
||||
def checkdnodes(self,dnodenumber):
|
||||
count=0
|
||||
while count < 100:
|
||||
|
@ -305,6 +251,14 @@ class TDTestCase:
|
|||
tdSql.checkData(2,2,'offline')
|
||||
tdSql.checkData(2,3,'ready')
|
||||
|
||||
|
||||
def check5dnode(self):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
|
||||
def five_dnode_three_mnode(self,dnodenumber):
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -346,6 +300,7 @@ class TDTestCase:
|
|||
threads.join()
|
||||
else:
|
||||
print("456")
|
||||
threads.join()
|
||||
self.stop_thread(threads)
|
||||
assert 1 == 2 ,"some dnode started failed"
|
||||
return False
|
||||
|
@ -357,16 +312,8 @@ class TDTestCase:
|
|||
self.check3mnode()
|
||||
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
port = dnode.cfgDict["serverPort"]
|
||||
config_dir = dnode.cfgDir
|
||||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.buildcluster(5)
|
||||
self.five_dnode_three_mnode(5)
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -12,7 +12,10 @@ from util.dnodes import TDDnodes
|
|||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
from test import tdDnodes
|
||||
sys.path.append("./6-cluster")
|
||||
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import *
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
|
@ -216,64 +219,88 @@ class TDTestCase:
|
|||
else:
|
||||
tdLog.exit("create cluster with %d dnode but check dnode not ready within 5s ! "%dnodeNumbers)
|
||||
|
||||
def five_dnode_three_mnode(self,dnodenumber):
|
||||
self.check_dnodes_status(5)
|
||||
tdSql.query("show mnodes;")
|
||||
tdLog.debug(self.host)
|
||||
tdSql.checkRows(1)
|
||||
def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 10,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
dnodenumbers=int(dnodenumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
dbNumbers = int(dnodenumbers * restartNumber)
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
time.sleep(10)
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# fisrt check statut ready
|
||||
self.check3mnode()
|
||||
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
|
||||
tdSql.query("show dnodes;")
|
||||
# tdLog.debug(tdSql.queryResult)
|
||||
|
||||
tdLog.debug("stop and follower of mnode")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
# restart all taosd
|
||||
tdDnodes=cluster.dnodes
|
||||
# tdLog.debug(tdDnodes[0])
|
||||
|
||||
tdDnodes[1].stoptaosd()
|
||||
self.check3mnode2off()
|
||||
clusterComCheck.check3mnodeoff(2,3)
|
||||
tdDnodes[1].starttaosd()
|
||||
self.check3mnode()
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
tdDnodes[2].stoptaosd()
|
||||
self.check3mnode3off()
|
||||
clusterComCheck.check3mnodeoff(3,3)
|
||||
tdDnodes[2].starttaosd()
|
||||
self.check3mnode()
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
tdDnodes[0].stoptaosd()
|
||||
self.check3mnode1off()
|
||||
clusterComCheck.check3mnodeoff(1,3)
|
||||
tdDnodes[0].starttaosd()
|
||||
self.check3mnode()
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
self.check3mnode()
|
||||
tdLog.info("Take turns stopping all dnodes ")
|
||||
# seperate vnode and mnode in different dnodes.
|
||||
# create database and stable
|
||||
stopcount =0
|
||||
while stopcount <= 2:
|
||||
for i in range(dnodenumber):
|
||||
tdLog.info("first restart loop")
|
||||
for i in range(dnodenumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
tdDnodes[i].starttaosd()
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
self.check3mnode()
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
def run(self):
|
||||
self.five_dnode_three_mnode(5)
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(5,3,1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -0,0 +1,211 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from collections import defaultdict
|
||||
import random
|
||||
import string
|
||||
import threading
|
||||
import requests
|
||||
import time
|
||||
# import socketfrom
|
||||
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
|
||||
# class actionType(Enum):
|
||||
# CREATE_DATABASE = 0
|
||||
# CREATE_STABLE = 1
|
||||
# CREATE_CTABLE = 2
|
||||
# INSERT_DATA = 3
|
||||
|
||||
class ClusterComCheck:
|
||||
def init(self, conn, logSql):
|
||||
tdSql.init(conn.cursor())
|
||||
# tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def checkDnodes(self,dnodeNumbers):
|
||||
count=0
|
||||
while count < 5:
|
||||
tdSql.query("show dnodes")
|
||||
# tdLog.debug(tdSql.queryResult)
|
||||
status=0
|
||||
for i in range(dnodeNumbers):
|
||||
if tdSql.queryResult[i][4] == "ready":
|
||||
status+=1
|
||||
tdLog.info(status)
|
||||
|
||||
if status == dnodeNumbers:
|
||||
tdLog.success("it find cluster with %d dnodes and check that all cluster dnodes are ready within 5s! " %dnodeNumbers)
|
||||
return True
|
||||
count+=1
|
||||
time.sleep(1)
|
||||
else:
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.exit("it find cluster with %d dnodes but check that there dnodes are not ready within 5s ! "%dnodeNumbers)
|
||||
|
||||
def checkDbRows(self,dbNumbers):
|
||||
dbNumbers=int(dbNumbers)
|
||||
count=0
|
||||
while count < 5:
|
||||
tdSql.query("show databases;")
|
||||
if tdSql.checkRows(dbNumbers+2):
|
||||
tdLog.success("we find %d databases and expect %d in clusters! " %(tdSql.queryRows,dbNumbers+2))
|
||||
return True
|
||||
else:
|
||||
continue
|
||||
else :
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.exit("we find %d databases but expect %d in clusters! " %(tdSql.queryRows,dbNumbers))
|
||||
|
||||
def checkDb(self,dbNumbers,dbindex):
|
||||
count=0
|
||||
while count < 5:
|
||||
query_status=0
|
||||
for i in range(dbNumbers):
|
||||
for j in range(dbNumbers):
|
||||
tdSql.query("show databases;")
|
||||
if "%s%d"%(dbindex,j) == tdSql.queryResult[i+2][0] :
|
||||
if tdSql.queryResult[i+2][19] == "ready":
|
||||
query_status+=1
|
||||
else:
|
||||
continue
|
||||
# print(query_status)
|
||||
count+=1
|
||||
if query_status == dbNumbers:
|
||||
tdLog.success("we find cluster with %d dnode and check all databases are ready within 5s! " %dbNumbers)
|
||||
return True
|
||||
else:
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.exit("database is not ready within 5s")
|
||||
|
||||
def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,):
|
||||
tdSql.execute("use %s"%dbname)
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(stableCount)
|
||||
tdSql.query("show tables")
|
||||
tdSql.checkRows(CtableCount)
|
||||
for i in range(stableCount):
|
||||
tdSql.query("select count(*) from %s%d"%(stbname,i))
|
||||
tdSql.checkData(0,0,rowsPerSTable)
|
||||
return
|
||||
|
||||
def checkMnodeStatus(self,mnodeNums):
|
||||
self.mnodeNums=int(mnodeNums)
|
||||
# self.leaderDnode=int(leaderDnode)
|
||||
|
||||
count=0
|
||||
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(self.mnodeNums) :
|
||||
tdLog.success("cluster has %d mnodes" %self.mnodeNums )
|
||||
|
||||
if self.mnodeNums == 1:
|
||||
if tdSql.queryResult[0][2]== 'leader' and tdSql.queryResult[0][3]== 'ready' :
|
||||
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
|
||||
return True
|
||||
count+=1
|
||||
elif self.mnodeNums == 3 :
|
||||
if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' :
|
||||
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
|
||||
if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' :
|
||||
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
|
||||
return True
|
||||
elif tdSql.queryResult[1][2]=='leader' and tdSql.queryResult[1][3]== 'ready' :
|
||||
if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' :
|
||||
if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' :
|
||||
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
|
||||
return True
|
||||
elif tdSql.queryResult[2][2]=='leader' and tdSql.queryResult[2][3]== 'ready' :
|
||||
if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' :
|
||||
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
|
||||
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
|
||||
return True
|
||||
count+=1
|
||||
elif self.mnodeNums == 2 :
|
||||
if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' :
|
||||
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
|
||||
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
|
||||
return True
|
||||
elif tdSql.queryResult[1][2]=='leader' and tdSql.queryResult[1][3]== 'ready' :
|
||||
if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' :
|
||||
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
|
||||
return True
|
||||
count+=1
|
||||
else:
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.exit("cluster of %d mnodes is not ready in 10s " %self.mnodeNums)
|
||||
|
||||
|
||||
|
||||
|
||||
def check3mnodeoff(self,offlineDnodeNo,mnodeNums=3):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(mnodeNums) :
|
||||
tdLog.success("cluster has %d mnodes" %self.mnodeNums )
|
||||
else:
|
||||
tdLog.exit("mnode number is correct")
|
||||
if offlineDnodeNo == 1:
|
||||
if tdSql.queryResult[0][2]=='offline' :
|
||||
if tdSql.queryResult[1][2]=='leader' and tdSql.queryResult[1][3]== 'ready' :
|
||||
if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' :
|
||||
tdLog.success("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
|
||||
return True
|
||||
elif tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
|
||||
if tdSql.queryResult[2][2]=='leader' and tdSql.queryResult[2][3]== 'ready' :
|
||||
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
|
||||
return True
|
||||
count+=1
|
||||
elif offlineDnodeNo == 2:
|
||||
if tdSql.queryResult[1][2]=='offline' :
|
||||
if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' :
|
||||
if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' :
|
||||
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
|
||||
return True
|
||||
elif tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' :
|
||||
if tdSql.queryResult[2][2]=='leader' and tdSql.queryResult[2][3]== 'ready' :
|
||||
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
|
||||
return True
|
||||
count+=1
|
||||
elif offlineDnodeNo == 3:
|
||||
if tdSql.queryResult[2][2]=='offline' :
|
||||
if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' :
|
||||
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
|
||||
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
|
||||
return True
|
||||
elif tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' :
|
||||
if tdSql.queryResult[1][2]=='leader' and tdSql.queryResult[1][3]== 'ready' :
|
||||
tdLog.debug("stop mnodes on dnode %d successfully in 10s" %offlineDnodeNo)
|
||||
return True
|
||||
count+=1
|
||||
else:
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.exit("stop mnodes on dnode %d failed in 10s ")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def close(self):
|
||||
self.cursor.close()
|
||||
|
||||
clusterComCheck = ClusterComCheck()
|
|
@ -0,0 +1,298 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from collections import defaultdict
|
||||
import random
|
||||
import string
|
||||
import threading
|
||||
import requests
|
||||
import time
|
||||
# import socketfrom
|
||||
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
|
||||
# class actionType(Enum):
|
||||
# CREATE_DATABASE = 0
|
||||
# CREATE_STABLE = 1
|
||||
# CREATE_CTABLE = 2
|
||||
# INSERT_DATA = 3
|
||||
|
||||
class ClusterComCreate:
|
||||
def init(self, conn, logSql):
|
||||
tdSql.init(conn.cursor())
|
||||
# tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def initConsumerTable(self,cdbName='cdb'):
|
||||
tdLog.info("create consume database, and consume info table, and consume result table")
|
||||
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
|
||||
tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
|
||||
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName)
|
||||
|
||||
def initConsumerInfoTable(self,cdbName='cdb'):
|
||||
tdLog.info("drop consumeinfo table")
|
||||
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
|
||||
|
||||
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
|
||||
tdLog.info("consume info sql: %s"%sql)
|
||||
tdSql.query(sql)
|
||||
|
||||
def selectConsumeResult(self,expectRows,cdbName='cdb'):
|
||||
resultList=[]
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
for i in range(expectRows):
|
||||
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
resultList.append(tdSql.getData(i , 3))
|
||||
|
||||
return resultList
|
||||
|
||||
def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
|
||||
buildPath = tdCom.getBuildPath()
|
||||
cfgPath = tdCom.getClientCfgPath()
|
||||
if valgrind == 1:
|
||||
logFile = cfgPath + '/../log/valgrind-tmq.log'
|
||||
shellCmd = 'nohup valgrind --log-file=' + logFile
|
||||
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
|
||||
|
||||
if (platform.system().lower() == 'windows'):
|
||||
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> nul 2>&1 &"
|
||||
else:
|
||||
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> /dev/null 2>&1 &"
|
||||
tdLog.info(shellCmd)
|
||||
os.system(shellCmd)
|
||||
|
||||
def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'):
|
||||
while 1:
|
||||
tdSql.query("select * from %s.notifyinfo"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0):
|
||||
break
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
return
|
||||
|
||||
def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
|
||||
while 1:
|
||||
tdSql.query("select * from %s.notifyinfo"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == 2 :
|
||||
print(tdSql.getData(0, 1), tdSql.getData(1, 1))
|
||||
if tdSql.getData(1, 1) == 1:
|
||||
break
|
||||
time.sleep(0.1)
|
||||
return
|
||||
|
||||
def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1):
|
||||
if dropFlag == 1:
|
||||
tsql.execute("drop database if exists %s"%(dbName))
|
||||
|
||||
tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica))
|
||||
tdLog.debug("complete to create database %s"%(dbName))
|
||||
return
|
||||
|
||||
def create_stable(self,tsql, dbName,stbName):
|
||||
tsql.execute("create table if not exists %s.%s (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbName, stbName))
|
||||
tdLog.debug("complete to create %s.%s" %(dbName, stbName))
|
||||
return
|
||||
|
||||
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
for i in range(ctbNum):
|
||||
tagValue = 'beijing'
|
||||
if (i % 2 == 0):
|
||||
tagValue = 'shanghai'
|
||||
|
||||
sql += " %s%d using %s tags(%d, '%s')"%(ctbPrefix,i,stbName,i+1, tagValue)
|
||||
if (i > 0) and (i%100 == 0):
|
||||
tsql.execute(sql)
|
||||
sql = pre_create
|
||||
if sql != pre_create:
|
||||
tsql.execute(sql)
|
||||
|
||||
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
|
||||
return
|
||||
|
||||
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=None):
|
||||
tdLog.debug("start to insert data ............")
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
if startTs is None:
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s%d values "%(stbName,i)
|
||||
for j in range(rowsPerTbl):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
|
||||
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
|
||||
tsql.execute(sql)
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s%d values " %(stbName,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
if sql != pre_insert:
|
||||
#print("insert sql:%s"%sql)
|
||||
tsql.execute(sql)
|
||||
tdLog.debug("insert data ............ [OK]")
|
||||
return
|
||||
|
||||
def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs):
|
||||
tdLog.debug("start to insert data ............")
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s%d values "%(ctbPrefix,i)
|
||||
for j in range(rowsPerTbl):
|
||||
if (j % 2 == 0):
|
||||
sql += "(%d, %d, %d, 'tmqrow_%d') "%(startTs + j, j, j, j)
|
||||
else:
|
||||
sql += "(%d, %d, %d, 'tmqrow_%d') "%(startTs + j, j, -j, j)
|
||||
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
|
||||
tsql.execute(sql)
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s%d values " %(ctbPrefix,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
if sql != pre_insert:
|
||||
#print("insert sql:%s"%sql)
|
||||
tsql.execute(sql)
|
||||
tdLog.debug("insert data ............ [OK]")
|
||||
return
|
||||
|
||||
def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0):
|
||||
tdLog.debug("start to insert data ............")
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
if startTs == 0:
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
|
||||
ctbDict = {}
|
||||
for i in range(ctbNum):
|
||||
ctbDict[i] = 0
|
||||
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
rowsOfCtb = 0
|
||||
while rowsOfCtb < rowsPerTbl:
|
||||
for i in range(ctbNum):
|
||||
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
|
||||
for k in range(batchNum):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i])
|
||||
ctbDict[i] += 1
|
||||
if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl):
|
||||
tsql.execute(sql)
|
||||
sql = "insert into "
|
||||
break
|
||||
rowsOfCtb = ctbDict[0]
|
||||
|
||||
tdLog.debug("insert data ............ [OK]")
|
||||
return
|
||||
|
||||
def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0):
|
||||
tdLog.debug("start to insert data wiht auto create child table ............")
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
if startTs == 0:
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
rowsOfSql = 0
|
||||
for i in range(ctbNum):
|
||||
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
|
||||
for j in range(rowsPerTbl):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
|
||||
rowsOfSql += 1
|
||||
if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)):
|
||||
tsql.execute(sql)
|
||||
rowsOfSql = 0
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
if sql != pre_insert:
|
||||
#print("insert sql:%s"%sql)
|
||||
tsql.execute(sql)
|
||||
tdLog.debug("insert data ............ [OK]")
|
||||
return
|
||||
|
||||
def syncCreateDbStbCtbInsertData(self, tsql, paraDict):
|
||||
tdCom.create_database(tsql, paraDict["dbName"],paraDict["dropFlag"])
|
||||
tdCom.create_stable(tsql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
|
||||
tdCom.create_ctable(tsql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
|
||||
if "event" in paraDict and type(paraDict['event']) == type(threading.Event()):
|
||||
paraDict["event"].set()
|
||||
|
||||
ctbPrefix = paraDict['ctbPrefix']
|
||||
ctbNum = paraDict["ctbNum"]
|
||||
for i in range(ctbNum):
|
||||
tbName = '%s%s'%(ctbPrefix,i)
|
||||
tdCom.insert_rows(tsql,dbname=paraDict["dbName"],tbname=tbName,start_ts_value=paraDict['startTs'],count=paraDict['rowsPerTbl'])
|
||||
return
|
||||
|
||||
def threadFunction(self, **paraDict):
|
||||
# create new connector for new tdSql instance in my thread
|
||||
newTdSql = tdCom.newTdSql()
|
||||
self.syncCreateDbStbCtbInsertData(self, newTdSql, paraDict)
|
||||
return
|
||||
|
||||
def asyncCreateDbStbCtbInsertData(self, paraDict):
|
||||
pThread = threading.Thread(target=self.threadFunction, kwargs=paraDict)
|
||||
pThread.start()
|
||||
return pThread
|
||||
|
||||
|
||||
def close(self):
|
||||
self.cursor.close()
|
||||
|
||||
clusterComCreate = ClusterComCreate()
|
|
@ -86,7 +86,13 @@ class TMQCom:
|
|||
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
|
||||
|
||||
if (platform.system().lower() == 'windows'):
|
||||
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
|
||||
processorName = buildPath + '\\build\\bin\\tmq_sim.exe'
|
||||
if alias != 0:
|
||||
processorNameNew = buildPath + '\\build\\bin\\tmq_sim_new.exe'
|
||||
shellCmd = 'cp %s %s'%(processorName, processorNameNew)
|
||||
os.system(shellCmd)
|
||||
processorName = processorNameNew
|
||||
shellCmd = 'mintty -h never ' + processorName + ' -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> nul 2>&1 &"
|
||||
else:
|
||||
|
|
|
@ -288,7 +288,10 @@ class TDTestCase:
|
|||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicFromStb1)
|
||||
os.system('pkill tmq_sim')
|
||||
if (platform.system().lower() == 'windows'):
|
||||
os.system("TASKKILL /F /IM tmq_sim.exe")
|
||||
else:
|
||||
os.system('pkill tmq_sim')
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
|
|
|
@ -0,0 +1,158 @@
|
|||
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def tmqCase1(self):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db1',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 10,
|
||||
'rowsPerTbl': 4000,
|
||||
'batchNum': 15,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 20,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
|
||||
topicNameList = ['topic1', 'topic2', 'topic3', 'topic4']
|
||||
consumeGroupIdList = ['cgrp1', 'cgrp1', 'cgrp3', 'cgrp4']
|
||||
consumerIdList = [0, 1, 2, 3]
|
||||
tmqCom.initConsumerTable()
|
||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict['vgroups'],replica=1)
|
||||
tdLog.info("create stb")
|
||||
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
|
||||
tdLog.info("create ctb")
|
||||
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
|
||||
# tdLog.info("insert data")
|
||||
# tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
|
||||
|
||||
tdLog.info("create 4 topics")
|
||||
sqlString = "create topic %s as database %s" %(topicNameList[0], paraDict['dbName'])
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
||||
sqlString = "create topic %s as stable %s.%s" %(topicNameList[1], paraDict['dbName'], paraDict['stbName'])
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
||||
queryString = "select * from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topicNameList[2], queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
||||
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s " %(topicNameList[3], queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
||||
tdSql.query("show topics")
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
rows = tdSql.getRows()
|
||||
if rows != len(consumerIdList):
|
||||
tdLog.exit("topic rows error")
|
||||
|
||||
for i in range (rows):
|
||||
topicName = tdSql.getData(i,0)
|
||||
matchFlag = 0
|
||||
while matchFlag == 0:
|
||||
for j in range(len(topicNameList)):
|
||||
if topicName == topicNameList[j]:
|
||||
matchFlag = 1
|
||||
break
|
||||
if matchFlag == 0:
|
||||
tdLog.exit("topic name: %s is error", topicName)
|
||||
|
||||
# init consume info, and start tmq_sim, then check consume result
|
||||
tdLog.info("insert consume info to consume processor")
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||
topicList = topicNameList[0]
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[0]
|
||||
tmqCom.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
topicList = topicNameList[1]
|
||||
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[1]
|
||||
tmqCom.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
topicList = topicNameList[2]
|
||||
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[2]
|
||||
tmqCom.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
topicList = topicNameList[3]
|
||||
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[3]
|
||||
tmqCom.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
|
||||
|
||||
tdLog.info("async insert data")
|
||||
pThread = tmqCom.asyncInsertData(paraDict)
|
||||
|
||||
time.sleep(5)
|
||||
tdLog.info("check show consumers")
|
||||
tdSql.query("show consumers")
|
||||
# tdLog.info(tdSql.queryResult)
|
||||
rows = tdSql.getRows()
|
||||
tdLog.info("show consumers rows: %d"%rows)
|
||||
if rows != len(topicNameList):
|
||||
tdLog.exit("show consumers rows error")
|
||||
|
||||
tdLog.info("check show subscriptions")
|
||||
tdSql.query("show subscriptions")
|
||||
# tdLog.debug(tdSql.queryResult)
|
||||
rows = tdSql.getRows()
|
||||
tdLog.info("show subscriptions rows: %d"%rows)
|
||||
if rows != paraDict['vgroups'] * len(topicNameList):
|
||||
tdLog.exit("show subscriptions rows error")
|
||||
|
||||
pThread.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = len(consumerIdList)
|
||||
_ = tmqCom.selectConsumeResult(expectRows)
|
||||
|
||||
time.sleep(10)
|
||||
for i in range(len(topicNameList)):
|
||||
tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
self.tmqCase1()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,186 @@
|
|||
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def checkFileContent(self, consumerId, queryString):
|
||||
buildPath = tdCom.getBuildPath()
|
||||
cfgPath = tdCom.getClientCfgPath()
|
||||
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
|
||||
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
|
||||
tdLog.info(cmdStr)
|
||||
os.system(cmdStr)
|
||||
|
||||
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
|
||||
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
|
||||
|
||||
consumeFile = open(consumeRowsFile, mode='r')
|
||||
queryFile = open(dstFile, mode='r')
|
||||
|
||||
# skip first line for it is schema
|
||||
queryFile.readline()
|
||||
|
||||
while True:
|
||||
dst = queryFile.readline()
|
||||
src = consumeFile.readline()
|
||||
|
||||
if dst:
|
||||
if dst != src:
|
||||
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
|
||||
else:
|
||||
break
|
||||
return
|
||||
|
||||
def tmqCase1(self):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db1',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 10,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
|
||||
topicNameList = ['topic1', 'topic2', 'topic3']
|
||||
expectRowsList = []
|
||||
tmqCom.initConsumerTable()
|
||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
|
||||
tdLog.info("create stb")
|
||||
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
|
||||
tdLog.info("create ctb")
|
||||
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
|
||||
tdLog.info("insert data")
|
||||
tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
|
||||
|
||||
tdLog.info("create topics from stb with filter")
|
||||
# queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
|
||||
# queryString = "select ts, c1, c2 from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
|
||||
queryString = "select * from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as stable %s.%s" %(topicNameList[0], paraDict["dbName"],paraDict["stbName"])
|
||||
# sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
# queryString = 'select * from %s.%s'%(paraDict["dbName"],paraDict["stbName"])
|
||||
tdSql.query(queryString)
|
||||
expectRowsList.append(tdSql.getRows())
|
||||
|
||||
# init consume info, and start tmq_sim, then check consume result
|
||||
tdLog.info("insert consume info to consume processor")
|
||||
consumerId = 0
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||
topicList = topicNameList[0]
|
||||
ifcheckdata = 1
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
|
||||
|
||||
tdLog.info("wait the consume result")
|
||||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
|
||||
if expectRowsList[0] != resultList[0]:
|
||||
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
|
||||
tdLog.exit("0 tmq consume rows error!")
|
||||
|
||||
self.checkFileContent(consumerId, queryString)
|
||||
|
||||
# reinit consume info, and start tmq_sim, then check consume result
|
||||
tmqCom.initConsumerTable()
|
||||
|
||||
queryString = "select ts, log(c1), cos(c1) from %s.%s where c1 > 3169" %(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
tdSql.query(queryString)
|
||||
expectRowsList.append(tdSql.getRows())
|
||||
|
||||
consumerId = 1
|
||||
topicList = topicNameList[1]
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
|
||||
|
||||
tdLog.info("wait the consume result")
|
||||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
if expectRowsList[1] != resultList[0]:
|
||||
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
|
||||
tdLog.exit("1 tmq consume rows error!")
|
||||
|
||||
self.checkFileContent(consumerId, queryString)
|
||||
|
||||
# reinit consume info, and start tmq_sim, then check consume result
|
||||
tmqCom.initConsumerTable()
|
||||
|
||||
queryString = "select ts, log(c1), atan(c1) from %s.%s where ts >= %d" %(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+6137)
|
||||
sqlString = "create topic %s as %s" %(topicNameList[2], queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
tdSql.query(queryString)
|
||||
expectRowsList.append(tdSql.getRows())
|
||||
|
||||
consumerId = 2
|
||||
topicList = topicNameList[2]
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
|
||||
|
||||
tdLog.info("wait the consume result")
|
||||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
# if expectRowsList[2] != resultList[0]:
|
||||
# tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0]))
|
||||
# tdLog.exit("2 tmq consume rows error!")
|
||||
|
||||
# self.checkFileContent(consumerId, queryString)
|
||||
|
||||
time.sleep(10)
|
||||
for i in range(len(topicNameList)):
|
||||
tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
self.tmqCase1()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -23,6 +23,7 @@ python3 ./test.py -f 1-insert/alter_stable.py
|
|||
python3 ./test.py -f 1-insert/alter_table.py
|
||||
python3 ./test.py -f 1-insert/insertWithMoreVgroup.py
|
||||
python3 ./test.py -f 1-insert/table_comment.py
|
||||
python3 ./test.py -f 1-insert/table_param_ttl.py
|
||||
python3 ./test.py -f 2-query/between.py
|
||||
python3 ./test.py -f 2-query/distinct.py
|
||||
python3 ./test.py -f 2-query/varchar.py
|
||||
|
@ -116,6 +117,10 @@ python3 ./test.py -f 2-query/function_null.py
|
|||
python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
||||
python3 ./test.py -f 6-cluster/5dnode2mnode.py
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3
|
||||
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopCreateDb.py -N 5 -M 3
|
||||
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
|
||||
|
||||
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
|
||||
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
|
||||
|
||||
|
@ -139,3 +144,4 @@ python3 ./test.py -f 7-tmq/tmqCheckData.py
|
|||
python3 ./test.py -f 7-tmq/tmqUdf.py
|
||||
#python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 5
|
||||
python3 ./test.py -f 7-tmq/tmqConsumerGroup.py
|
||||
python3 ./test.py -f 7-tmq/tmqShow.py
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
SETLOCAL EnableDelayedExpansion
|
||||
for /F "tokens=1,2 delims=#" %%a in ('"prompt #$H#$E# & echo on & for %%b in (1) do rem"') do ( set "DEL=%%a")
|
||||
set /a a=0
|
||||
if %1 == full (
|
||||
if "%1" == "full" (
|
||||
echo Windows Taosd Full Test
|
||||
set /a exitNum=0
|
||||
del /Q /F failed.txt
|
||||
|
|
|
@ -22,8 +22,6 @@ import json
|
|||
import platform
|
||||
import socket
|
||||
import threading
|
||||
from distutils.log import warn as printf
|
||||
from fabric2 import Connection
|
||||
sys.path.append("../pytest")
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
|
@ -187,9 +185,9 @@ if __name__ == "__main__":
|
|||
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
if platform.system().lower() == 'windows':
|
||||
fileName = fileName.replace("/", os.sep)
|
||||
if (masterIp == "" and not fileName[0:12] == "0-others\\udf"):
|
||||
threading.Thread(target=checkRunTimeError,daemon=True).start()
|
||||
tdCases.logSql(logSql)
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
|
@ -208,18 +206,46 @@ if __name__ == "__main__":
|
|||
uModule = importlib.import_module(moduleName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and (ucase.updatecfgDict is not None)):
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
except Exception as r:
|
||||
print(r)
|
||||
else:
|
||||
pass
|
||||
tdDnodes.deploy(1,updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
conn = taos.connect(
|
||||
host="%s"%(host),
|
||||
config=tdDnodes.sim.getCfgDir())
|
||||
if dnodeNums == 1 :
|
||||
tdDnodes.deploy(1,updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
else :
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.stopAll()
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.deploy(dnode.index,{})
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
conn = taos.connect(
|
||||
host,
|
||||
config=tdDnodes.getSimCfgPath())
|
||||
print(tdDnodes.getSimCfgPath(),host)
|
||||
cluster.create_dnode(conn)
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
print("check dnode ready")
|
||||
except Exception as r:
|
||||
print(r)
|
||||
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
|
||||
conn = None
|
||||
else:
|
||||
conn = taos.connect(
|
||||
host="%s"%(host),
|
||||
config=tdDnodes.sim.getCfgDir())
|
||||
if is_test_framework:
|
||||
tdCases.runOneWindows(conn, fileName)
|
||||
else:
|
||||
|
@ -307,4 +333,5 @@ if __name__ == "__main__":
|
|||
tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py")
|
||||
else:
|
||||
tdLog.info("not need to query")
|
||||
conn.close()
|
||||
if conn is not None:
|
||||
conn.close()
|
||||
|
|
|
@ -635,8 +635,9 @@ void loop_consume(SThreadInfo* pInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t consumeDelay = g_stConfInfo.consumeDelay == -1 ? -1 : (g_stConfInfo.consumeDelay * 1000);
|
||||
while (running) {
|
||||
TAOS_RES* tmqMsg = tmq_consumer_poll(pInfo->tmq, g_stConfInfo.consumeDelay * 1000);
|
||||
TAOS_RES* tmqMsg = tmq_consumer_poll(pInfo->tmq, consumeDelay);
|
||||
if (tmqMsg) {
|
||||
if (0 != g_stConfInfo.showMsgFlag) {
|
||||
totalRows += msg_process(tmqMsg, pInfo, totalMsgs);
|
||||
|
|
|
@ -156,6 +156,7 @@ void shellRunSingleCommandImp(char *command) {
|
|||
}
|
||||
|
||||
fname = sptr + 2;
|
||||
while (*fname == ' ') fname++;
|
||||
*sptr = '\0';
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue