Merge branch 'enh/rocksdbSstate' of https://github.com/taosdata/TDengine into enh/rocksdbSstate
This commit is contained in:
commit
a6570a4327
|
@ -352,4 +352,4 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java
|
||||||
|
|
||||||
# 加入技术交流群
|
# 加入技术交流群
|
||||||
|
|
||||||
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
|
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine1",加小 T 为好友,即可入群。
|
||||||
|
|
|
@ -113,14 +113,14 @@ typedef struct {
|
||||||
int64_t ver;
|
int64_t ver;
|
||||||
int32_t* dataRef;
|
int32_t* dataRef;
|
||||||
SPackedData submit;
|
SPackedData submit;
|
||||||
} SStreamDataSubmit2;
|
} SStreamDataSubmit;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
int64_t ver;
|
int64_t ver;
|
||||||
SArray* dataRefs; // SArray<int32_t*>
|
SArray* dataRefs; // SArray<int32_t*>
|
||||||
SArray* submits; // SArray<SPackedSubmit>
|
SArray* submits; // SArray<SPackedSubmit>
|
||||||
} SStreamMergedSubmit2;
|
} SStreamMergedSubmit;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
|
@ -209,10 +209,10 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
|
||||||
|
|
||||||
void* streamQueueNextItem(SStreamQueue* queue);
|
void* streamQueueNextItem(SStreamQueue* queue);
|
||||||
|
|
||||||
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type);
|
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
|
||||||
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
|
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
|
||||||
|
|
||||||
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit);
|
SStreamDataSubmit* streamSubmitBlockClone(SStreamDataSubmit* pSubmit);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char* qmsg;
|
char* qmsg;
|
||||||
|
@ -239,6 +239,7 @@ typedef struct {
|
||||||
void* vnode; // not available to encoder and decoder
|
void* vnode; // not available to encoder and decoder
|
||||||
FTbSink* tbSinkFunc;
|
FTbSink* tbSinkFunc;
|
||||||
STSchema* pTSchema;
|
STSchema* pTSchema;
|
||||||
|
SSHashObj* pTblInfo;
|
||||||
} STaskSinkTb;
|
} STaskSinkTb;
|
||||||
|
|
||||||
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
|
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
|
||||||
|
|
|
@ -55,7 +55,7 @@ else
|
||||||
exit $?
|
exit $?
|
||||||
fi
|
fi
|
||||||
while true; do
|
while true; do
|
||||||
es=$(taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT --check)
|
es=$(taos -h $FIRST_EP_HOST -P $FIRST_EP_PORT --check | grep "^[0-9]*:")
|
||||||
echo ${es}
|
echo ${es}
|
||||||
if [ "${es%%:*}" -eq 2 ]; then
|
if [ "${es%%:*}" -eq 2 ]; then
|
||||||
echo "execute create dnode"
|
echo "execute create dnode"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
es=$(taos --check)
|
es=$(taos --check | grep "^[0-9]*:")
|
||||||
code=${es%%:*}
|
code=${es%%:*}
|
||||||
if [ "$code" -ne "0" ] && [ "$code" -ne "4" ]; then
|
if [ "$code" -ne "0" ] && [ "$code" -ne "4" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
|
|
|
@ -1351,7 +1351,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
||||||
}
|
}
|
||||||
taosArrayPush(info->pRequest->tableList, &pName);
|
taosArrayPush(info->pRequest->tableList, &pName);
|
||||||
|
|
||||||
tstrncpy(pName.tname, tableData->childTableName, strlen(tableData->childTableName) + 1);
|
strcpy(pName.tname, tableData->childTableName);
|
||||||
|
|
||||||
SRequestConnInfo conn = {0};
|
SRequestConnInfo conn = {0};
|
||||||
conn.pTrans = info->taos->pAppInfo->pTransporter;
|
conn.pTrans = info->taos->pAppInfo->pTransporter;
|
||||||
|
|
|
@ -84,7 +84,7 @@ bool tsMonitorComp = false;
|
||||||
// telem
|
// telem
|
||||||
bool tsEnableTelem = true;
|
bool tsEnableTelem = true;
|
||||||
int32_t tsTelemInterval = 43200;
|
int32_t tsTelemInterval = 43200;
|
||||||
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com";
|
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.tdengine.com";
|
||||||
uint16_t tsTelemPort = 80;
|
uint16_t tsTelemPort = 80;
|
||||||
char *tsTelemUri = "/report";
|
char *tsTelemUri = "/report";
|
||||||
|
|
||||||
|
|
|
@ -87,18 +87,6 @@ static void dmStopDnode(int signum, void *sigInfo, void *context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void dmLogCrash(int signum, void *sigInfo, void *context) {
|
void dmLogCrash(int signum, void *sigInfo, void *context) {
|
||||||
taosIgnSignal(SIGTERM);
|
|
||||||
taosIgnSignal(SIGHUP);
|
|
||||||
taosIgnSignal(SIGINT);
|
|
||||||
taosIgnSignal(SIGBREAK);
|
|
||||||
|
|
||||||
#ifndef WINDOWS
|
|
||||||
taosIgnSignal(SIGBUS);
|
|
||||||
#endif
|
|
||||||
taosIgnSignal(SIGABRT);
|
|
||||||
taosIgnSignal(SIGFPE);
|
|
||||||
taosIgnSignal(SIGSEGV);
|
|
||||||
|
|
||||||
char *pMsg = NULL;
|
char *pMsg = NULL;
|
||||||
const char *flags = "UTL FATAL ";
|
const char *flags = "UTL FATAL ";
|
||||||
ELogLevel level = DEBUG_FATAL;
|
ELogLevel level = DEBUG_FATAL;
|
||||||
|
|
|
@ -256,11 +256,14 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
|
||||||
snprintf(db, TSDB_DB_FNAME_LEN, "%d%s%s", pUser->acctId, TS_PATH_DELIMITER, connReq.db);
|
snprintf(db, TSDB_DB_FNAME_LEN, "%d%s%s", pUser->acctId, TS_PATH_DELIMITER, connReq.db);
|
||||||
pDb = mndAcquireDb(pMnode, db);
|
pDb = mndAcquireDb(pMnode, db);
|
||||||
if (pDb == NULL) {
|
if (pDb == NULL) {
|
||||||
|
if (0 != strcmp(connReq.db, TSDB_INFORMATION_SCHEMA_DB) &&
|
||||||
|
(0 != strcmp(connReq.db, TSDB_PERFORMANCE_SCHEMA_DB))) {
|
||||||
terrno = TSDB_CODE_MND_INVALID_DB;
|
terrno = TSDB_CODE_MND_INVALID_DB;
|
||||||
mGError("user:%s, failed to login from %s while use db:%s since %s", pReq->info.conn.user, ip, connReq.db,
|
mGError("user:%s, failed to login from %s while use db:%s since %s", pReq->info.conn.user, ip, connReq.db,
|
||||||
terrstr());
|
terrstr());
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_OR_WRITE_DB, pDb) != 0) {
|
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_OR_WRITE_DB, pDb) != 0) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
|
|
|
@ -101,6 +101,8 @@ typedef struct {
|
||||||
STqPushHandle pushHandle; // push
|
STqPushHandle pushHandle; // push
|
||||||
STqExecHandle execHandle; // exec
|
STqExecHandle execHandle; // exec
|
||||||
SRpcMsg* msg;
|
SRpcMsg* msg;
|
||||||
|
int32_t noDataPollCnt;
|
||||||
|
int8_t exec;
|
||||||
} STqHandle;
|
} STqHandle;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -181,7 +183,7 @@ int32_t tqStreamTasksScanWal(STQ* pTq);
|
||||||
|
|
||||||
// tq util
|
// tq util
|
||||||
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
||||||
int32_t tqAddInputBlockNLaunchTask(SStreamTask* pTask, SStreamQueueItem* pQueueItem, int64_t ver);
|
int32_t tqAddBlockNLaunchTask(SStreamTask* pTask, SPackedData* pData);
|
||||||
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg);
|
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -572,6 +572,10 @@ end:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void freePtr(void *ptr) {
|
||||||
|
taosMemoryFree(*(void**)ptr);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
int32_t vgId = TD_VID(pTq->pVnode);
|
int32_t vgId = TD_VID(pTq->pVnode);
|
||||||
pTask->id.idStr = createStreamTaskIdStr(pTask->id.streamId, pTask->id.taskId);
|
pTask->id.idStr = createStreamTaskIdStr(pTask->id.streamId, pTask->id.taskId);
|
||||||
|
@ -646,6 +650,8 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
if (pTask->tbSink.pTSchema == NULL) {
|
if (pTask->tbSink.pTSchema == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
pTask->tbSink.pTblInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
|
||||||
|
tSimpleHashSetFreeFp(pTask->tbSink.pTblInfo, freePtr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
|
|
|
@ -55,6 +55,7 @@ int32_t tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg) {
|
||||||
memcpy(pHandle->msg, pMsg, sizeof(SRpcMsg));
|
memcpy(pHandle->msg, pMsg, sizeof(SRpcMsg));
|
||||||
pHandle->msg->pCont = rpcMallocCont(pMsg->contLen);
|
pHandle->msg->pCont = rpcMallocCont(pMsg->contLen);
|
||||||
} else {
|
} else {
|
||||||
|
tqPushDataRsp(pTq, pHandle);
|
||||||
void* tmp = pHandle->msg->pCont;
|
void* tmp = pHandle->msg->pCont;
|
||||||
memcpy(pHandle->msg, pMsg, sizeof(SRpcMsg));
|
memcpy(pHandle->msg, pMsg, sizeof(SRpcMsg));
|
||||||
pHandle->msg->pCont = tmp;
|
pHandle->msg->pCont = tmp;
|
||||||
|
|
|
@ -312,7 +312,6 @@ int32_t extractSubmitMsgFromWal(SWalReader* pReader, SPackedData* pPackedData) {
|
||||||
|
|
||||||
void* data = taosMemoryMalloc(len);
|
void* data = taosMemoryMalloc(len);
|
||||||
if (data == NULL) {
|
if (data == NULL) {
|
||||||
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then retry
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
tqError("vgId:%d, failed to copy submit data for stream processing, since out of memory", 0);
|
tqError("vgId:%d, failed to copy submit data for stream processing, since out of memory", 0);
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -120,8 +120,6 @@ int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// append the data for the stream
|
// append the data for the stream
|
||||||
tqDebug("vgId:%d s-task:%s wal reader seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.currentVer);
|
tqDebug("vgId:%d s-task:%s wal reader seek to ver:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.currentVer);
|
||||||
} else {
|
} else {
|
||||||
|
@ -145,17 +143,9 @@ int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamDataSubmit2* p = streamDataSubmitNew(packData, STREAM_INPUT__DATA_SUBMIT);
|
|
||||||
if (p == NULL) {
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
tqError("%s failed to create data submit for stream since out of memory", pTask->id.idStr);
|
|
||||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
noNewDataInWal = false;
|
noNewDataInWal = false;
|
||||||
|
|
||||||
code = tqAddInputBlockNLaunchTask(pTask, (SStreamQueueItem*)p, packData.ver);
|
code = tqAddBlockNLaunchTask(pTask, &packData);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
pTask->chkInfo.currentVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
pTask->chkInfo.currentVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||||
tqDebug("s-task:%s set the ver:%" PRId64 " from WALReader after extract block from WAL", pTask->id.idStr,
|
tqDebug("s-task:%s set the ver:%" PRId64 " from WALReader after extract block from WAL", pTask->id.idStr,
|
||||||
|
@ -164,8 +154,6 @@ int32_t createStreamRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
tqError("s-task:%s append input queue failed, ver:%" PRId64, pTask->id.idStr, pTask->chkInfo.currentVer);
|
tqError("s-task:%s append input queue failed, ver:%" PRId64, pTask->id.idStr, pTask->chkInfo.currentVer);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamDataSubmitDestroy(p);
|
|
||||||
taosFreeQitem(p);
|
|
||||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,13 @@
|
||||||
#include "tmsg.h"
|
#include "tmsg.h"
|
||||||
#include "tq.h"
|
#include "tq.h"
|
||||||
|
|
||||||
|
#define MAX_CACHE_TABLE_INFO_NUM 10240
|
||||||
|
|
||||||
|
typedef struct STableSinkInfo {
|
||||||
|
uint64_t uid;
|
||||||
|
char tbName[TSDB_TABLE_NAME_LEN];
|
||||||
|
} STableSinkInfo;
|
||||||
|
|
||||||
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
||||||
const char* pIdStr) {
|
const char* pIdStr) {
|
||||||
int32_t totalRows = pDataBlock->info.rows;
|
int32_t totalRows = pDataBlock->info.rows;
|
||||||
|
@ -90,6 +97,24 @@ end:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t tqGetTableInfo(SSHashObj* pTableInfoMap,uint64_t groupId, STableSinkInfo** pInfo) {
|
||||||
|
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
|
||||||
|
if (pVal) {
|
||||||
|
*pInfo = *(STableSinkInfo**)pVal;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tqPutTableInfo(SSHashObj* tblInfo ,uint64_t groupId, STableSinkInfo* pTbl) {
|
||||||
|
if (tSimpleHashGetSize(tblInfo) > MAX_CACHE_TABLE_INFO_NUM) {
|
||||||
|
return TSDB_CODE_FAILED;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tSimpleHashPut(tblInfo, &groupId, sizeof(uint64_t), &pTbl, POINTER_BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
|
int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
|
||||||
void* buf = NULL;
|
void* buf = NULL;
|
||||||
int32_t tlen = 0;
|
int32_t tlen = 0;
|
||||||
|
@ -251,7 +276,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
||||||
crTblArray = NULL;
|
crTblArray = NULL;
|
||||||
} else {
|
} else {
|
||||||
SSubmitTbData tbData = {0};
|
SSubmitTbData tbData = {0};
|
||||||
tqDebug("tq sink pipe, convert block1 %d, rows: %d", i, rows);
|
tqDebug("tq sink pipe, convert block:%d, rows:%d", i, rows);
|
||||||
|
|
||||||
if (!(tbData.aRowP = taosArrayInit(rows, sizeof(SRow*)))) {
|
if (!(tbData.aRowP = taosArrayInit(rows, sizeof(SRow*)))) {
|
||||||
goto _end;
|
goto _end;
|
||||||
|
@ -261,24 +286,39 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
||||||
tbData.uid = 0; // uid is assigned by vnode
|
tbData.uid = 0; // uid is assigned by vnode
|
||||||
tbData.sver = pTSchema->version;
|
tbData.sver = pTSchema->version;
|
||||||
|
|
||||||
char* ctbName = NULL;
|
STableSinkInfo* pTableSinkInfo = NULL;
|
||||||
tqDebug("vgId:%d, stream write into %s, table auto created", TD_VID(pVnode), pDataBlock->info.parTbName);
|
int32_t res = tqGetTableInfo(pTask->tbSink.pTblInfo, pDataBlock->info.id.groupId, &pTableSinkInfo);
|
||||||
if (pDataBlock->info.parTbName[0]) {
|
if (res != TSDB_CODE_SUCCESS) {
|
||||||
ctbName = taosStrdup(pDataBlock->info.parTbName);
|
pTableSinkInfo = taosMemoryCalloc(1, sizeof(STableSinkInfo));
|
||||||
} else {
|
|
||||||
ctbName = buildCtbNameByGroupId(stbFullName, pDataBlock->info.id.groupId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
char* ctbName = pDataBlock->info.parTbName;
|
||||||
|
if (!ctbName[0]) {
|
||||||
|
if (res == TSDB_CODE_SUCCESS) {
|
||||||
|
memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName));
|
||||||
|
} else {
|
||||||
|
char* tmp = buildCtbNameByGroupId(stbFullName, pDataBlock->info.id.groupId);
|
||||||
|
memcpy(ctbName, tmp, strlen(tmp));
|
||||||
|
memcpy(pTableSinkInfo->tbName, tmp, strlen(tmp));
|
||||||
|
taosMemoryFree(tmp);
|
||||||
|
tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode),
|
||||||
|
pDataBlock->info.id.groupId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (res == TSDB_CODE_SUCCESS) {
|
||||||
|
tbData.uid = pTableSinkInfo->uid;
|
||||||
|
} else {
|
||||||
SMetaReader mr = {0};
|
SMetaReader mr = {0};
|
||||||
metaReaderInit(&mr, pVnode->pMeta, 0);
|
metaReaderInit(&mr, pVnode->pMeta, 0);
|
||||||
if (metaGetTableEntryByName(&mr, ctbName) < 0) {
|
if (metaGetTableEntryByName(&mr, ctbName) < 0) {
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
|
taosMemoryFree(pTableSinkInfo);
|
||||||
tqDebug("vgId:%d, stream write into %s, table auto created", TD_VID(pVnode), ctbName);
|
tqDebug("vgId:%d, stream write into %s, table auto created", TD_VID(pVnode), ctbName);
|
||||||
|
|
||||||
SVCreateTbReq* pCreateTbReq = NULL;
|
SVCreateTbReq* pCreateTbReq = NULL;
|
||||||
|
|
||||||
if (!(pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateStbReq)))) {
|
if (!(pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateStbReq)))) {
|
||||||
taosMemoryFree(ctbName);
|
|
||||||
goto _end;
|
goto _end;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -295,7 +335,6 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
||||||
// set tag content
|
// set tag content
|
||||||
tagArray = taosArrayInit(1, sizeof(STagVal));
|
tagArray = taosArrayInit(1, sizeof(STagVal));
|
||||||
if (!tagArray) {
|
if (!tagArray) {
|
||||||
taosMemoryFree(ctbName);
|
|
||||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||||
goto _end;
|
goto _end;
|
||||||
}
|
}
|
||||||
|
@ -311,11 +350,8 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
||||||
tTagNew(tagArray, 1, false, &pTag);
|
tTagNew(tagArray, 1, false, &pTag);
|
||||||
tagArray = taosArrayDestroy(tagArray);
|
tagArray = taosArrayDestroy(tagArray);
|
||||||
if (pTag == NULL) {
|
if (pTag == NULL) {
|
||||||
taosMemoryFree(ctbName);
|
|
||||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
taosMemoryFree(ctbName);
|
|
||||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
|
||||||
goto _end;
|
goto _end;
|
||||||
}
|
}
|
||||||
pCreateTbReq->ctb.pTag = (uint8_t*)pTag;
|
pCreateTbReq->ctb.pTag = (uint8_t*)pTag;
|
||||||
|
@ -328,8 +364,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
||||||
pCreateTbReq->ctb.tagName = tagName;
|
pCreateTbReq->ctb.tagName = tagName;
|
||||||
|
|
||||||
// set table name
|
// set table name
|
||||||
pCreateTbReq->name = ctbName;
|
pCreateTbReq->name = taosStrdup(ctbName);
|
||||||
ctbName = NULL;
|
|
||||||
|
|
||||||
tbData.pCreateTbReq = pCreateTbReq;
|
tbData.pCreateTbReq = pCreateTbReq;
|
||||||
tbData.flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
|
tbData.flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
|
||||||
|
@ -338,7 +373,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
||||||
tqError("vgId:%d, failed to write into %s, since table type incorrect, type %d", TD_VID(pVnode), ctbName,
|
tqError("vgId:%d, failed to write into %s, since table type incorrect, type %d", TD_VID(pVnode), ctbName,
|
||||||
mr.me.type);
|
mr.me.type);
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
taosMemoryFree(ctbName);
|
taosMemoryFree(pTableSinkInfo);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,13 +382,18 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
||||||
", actual suid %" PRId64 "",
|
", actual suid %" PRId64 "",
|
||||||
TD_VID(pVnode), ctbName, suid, mr.me.ctbEntry.suid);
|
TD_VID(pVnode), ctbName, suid, mr.me.ctbEntry.suid);
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
taosMemoryFree(ctbName);
|
taosMemoryFree(pTableSinkInfo);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
tbData.uid = mr.me.uid;
|
tbData.uid = mr.me.uid;
|
||||||
|
pTableSinkInfo->uid = mr.me.uid;
|
||||||
|
int32_t code = tqPutTableInfo(pTask->tbSink.pTblInfo, pDataBlock->info.id.groupId, pTableSinkInfo);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
taosMemoryFreeClear(pTableSinkInfo);
|
||||||
|
}
|
||||||
metaReaderClear(&mr);
|
metaReaderClear(&mr);
|
||||||
taosMemoryFreeClear(ctbName);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// rows
|
// rows
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include "tq.h"
|
#include "tq.h"
|
||||||
|
|
||||||
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
|
#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
|
||||||
|
#define NO_POLL_CNT 5
|
||||||
|
|
||||||
static int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp);
|
static int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp);
|
||||||
|
|
||||||
|
@ -25,10 +26,15 @@ char* createStreamTaskIdStr(int64_t streamId, int32_t taskId) {
|
||||||
return taosStrdup(buf);
|
return taosStrdup(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqAddInputBlockNLaunchTask(SStreamTask* pTask, SStreamQueueItem* pQueueItem, int64_t ver) {
|
int32_t tqAddBlockNLaunchTask(SStreamTask* pTask, SPackedData *pPackedData) {
|
||||||
int32_t code = tAppendDataToInputQueue(pTask, pQueueItem);
|
SStreamDataSubmit* p = streamDataSubmitNew(pPackedData, STREAM_INPUT__DATA_SUBMIT);
|
||||||
|
|
||||||
|
int32_t code = tAppendDataToInputQueue(pTask, (SStreamQueueItem*) p);
|
||||||
|
streamDataSubmitDestroy(p);
|
||||||
|
taosFreeQitem(p);
|
||||||
|
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
tqError("s-task:%s failed to put into queue, too many, next start ver:%" PRId64, pTask->id.idStr, ver);
|
tqError("s-task:%s failed to put into queue, too many, next start ver:%" PRId64, pTask->id.idStr, pPackedData->ver);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,6 +167,10 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool isHandleExecuting(STqHandle* pHandle){
|
||||||
|
return 1 == atomic_load_8(&pHandle->exec);
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||||
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
|
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
|
||||||
uint64_t consumerId = pRequest->consumerId;
|
uint64_t consumerId = pRequest->consumerId;
|
||||||
|
@ -176,6 +186,12 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
while(isHandleExecuting(pHandle)){
|
||||||
|
tqInfo("sub is executing, pHandle:%p", pHandle);
|
||||||
|
taosMsleep(5);
|
||||||
|
}
|
||||||
|
atomic_store_8(&pHandle->exec, 1);
|
||||||
|
|
||||||
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
|
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
|
||||||
code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
|
code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
|
||||||
if(code != 0) {
|
if(code != 0) {
|
||||||
|
@ -185,17 +201,23 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
// till now, all data has been transferred to consumer, new data needs to push client once arrived.
|
// till now, all data has been transferred to consumer, new data needs to push client once arrived.
|
||||||
if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
|
if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
|
||||||
dataRsp.reqOffset.version == dataRsp.rspOffset.version && pHandle->consumerId == pRequest->consumerId) {
|
dataRsp.reqOffset.version == dataRsp.rspOffset.version && pHandle->consumerId == pRequest->consumerId) {
|
||||||
|
if(pHandle->noDataPollCnt >= NO_POLL_CNT){ // send poll result to client if no data 5 times to avoid lost data
|
||||||
|
pHandle->noDataPollCnt = 0;
|
||||||
// lock
|
// lock
|
||||||
taosWLockLatch(&pTq->lock);
|
taosWLockLatch(&pTq->lock);
|
||||||
code = tqRegisterPushHandle(pTq, pHandle, pMsg);
|
code = tqRegisterPushHandle(pTq, pHandle, pMsg);
|
||||||
taosWUnLockLatch(&pTq->lock);
|
taosWUnLockLatch(&pTq->lock);
|
||||||
tDeleteSMqDataRsp(&dataRsp);
|
tDeleteSMqDataRsp(&dataRsp);
|
||||||
|
atomic_store_8(&pHandle->exec, 0);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
else{
|
||||||
|
pHandle->noDataPollCnt++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP);
|
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP);
|
||||||
|
|
||||||
// NOTE: this pHandle->consumerId may have been changed already.
|
// NOTE: this pHandle->consumerId may have been changed already.
|
||||||
|
|
||||||
end:
|
end:
|
||||||
|
@ -207,6 +229,8 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
// taosWUnLockLatch(&pTq->lock);
|
// taosWUnLockLatch(&pTq->lock);
|
||||||
tDeleteSMqDataRsp(&dataRsp);
|
tDeleteSMqDataRsp(&dataRsp);
|
||||||
}
|
}
|
||||||
|
atomic_store_8(&pHandle->exec, 0);
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,10 +249,16 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
while(isHandleExecuting(pHandle)){
|
||||||
|
tqInfo("sub is executing, pHandle:%p", pHandle);
|
||||||
|
taosMsleep(5);
|
||||||
|
}
|
||||||
|
atomic_store_8(&pHandle->exec, 1);
|
||||||
|
|
||||||
if (offset->type != TMQ_OFFSET__LOG) {
|
if (offset->type != TMQ_OFFSET__LOG) {
|
||||||
if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, offset) < 0) {
|
if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, offset) < 0) {
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
code = -1;
|
||||||
return -1;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (metaRsp.metaRspLen > 0) {
|
if (metaRsp.metaRspLen > 0) {
|
||||||
|
@ -236,16 +266,14 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64 ",ts:%" PRId64,
|
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64 ",ts:%" PRId64,
|
||||||
pRequest->consumerId, pHandle->subKey, vgId, metaRsp.rspOffset.type, metaRsp.rspOffset.uid, metaRsp.rspOffset.ts);
|
pRequest->consumerId, pHandle->subKey, vgId, metaRsp.rspOffset.type, metaRsp.rspOffset.uid, metaRsp.rspOffset.ts);
|
||||||
taosMemoryFree(metaRsp.metaRsp);
|
taosMemoryFree(metaRsp.metaRsp);
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
goto end;
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
|
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
|
||||||
",ts:%" PRId64,pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type, taosxRsp.rspOffset.uid,taosxRsp.rspOffset.ts);
|
",ts:%" PRId64,pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type, taosxRsp.rspOffset.uid,taosxRsp.rspOffset.ts);
|
||||||
if (taosxRsp.blockNum > 0) {
|
if (taosxRsp.blockNum > 0) {
|
||||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
goto end;
|
||||||
return code;
|
|
||||||
}else {
|
}else {
|
||||||
*offset = taosxRsp.rspOffset;
|
*offset = taosxRsp.rspOffset;
|
||||||
}
|
}
|
||||||
|
@ -257,9 +285,9 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
int64_t fetchVer = offset->version + 1;
|
int64_t fetchVer = offset->version + 1;
|
||||||
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
|
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
|
||||||
if (pCkHead == NULL) {
|
if (pCkHead == NULL) {
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return -1;
|
code = -1;
|
||||||
|
goto end;
|
||||||
}
|
}
|
||||||
walSetReaderCapacity(pHandle->pWalReader, 2048);
|
walSetReaderCapacity(pHandle->pWalReader, 2048);
|
||||||
int totalRows = 0;
|
int totalRows = 0;
|
||||||
|
@ -274,9 +302,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) {
|
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) {
|
||||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
goto end;
|
||||||
taosMemoryFreeClear(pCkHead);
|
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SWalCont* pHead = &pCkHead->head;
|
SWalCont* pHead = &pCkHead->head;
|
||||||
|
@ -288,9 +314,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
if(totalRows > 0) {
|
if(totalRows > 0) {
|
||||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer - 1);
|
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer - 1);
|
||||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
goto end;
|
||||||
taosMemoryFreeClear(pCkHead);
|
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%s", pHead->version, TMSG_INFO(pHead->msgType));
|
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%s", pHead->version, TMSG_INFO(pHead->msgType));
|
||||||
|
@ -298,17 +322,8 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
metaRsp.resMsgType = pHead->msgType;
|
metaRsp.resMsgType = pHead->msgType;
|
||||||
metaRsp.metaRspLen = pHead->bodyLen;
|
metaRsp.metaRspLen = pHead->bodyLen;
|
||||||
metaRsp.metaRsp = pHead->body;
|
metaRsp.metaRsp = pHead->body;
|
||||||
if (tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp) < 0) {
|
code = tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp);
|
||||||
code = -1;
|
goto end;
|
||||||
taosMemoryFreeClear(pCkHead);
|
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
code = 0;
|
|
||||||
taosMemoryFreeClear(pCkHead);
|
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// process data
|
// process data
|
||||||
|
@ -318,29 +333,28 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
||||||
.ver = pHead->version,
|
.ver = pHead->version,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows) < 0) {
|
code = tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows);
|
||||||
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
|
if (code < 0) {
|
||||||
pRequest->subKey);
|
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId, pRequest->subKey);
|
||||||
taosMemoryFreeClear(pCkHead);
|
goto end;
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
|
||||||
return -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (totalRows >= 4096 || taosxRsp.createTableNum > 0) {
|
if (totalRows >= 4096 || taosxRsp.createTableNum > 0) {
|
||||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||||
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
goto end;
|
||||||
taosMemoryFreeClear(pCkHead);
|
|
||||||
return code;
|
|
||||||
} else {
|
} else {
|
||||||
fetchVer++;
|
fetchVer++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
end:
|
||||||
|
atomic_store_8(&pHandle->exec, 0);
|
||||||
|
|
||||||
tDeleteSTaosxRsp(&taosxRsp);
|
tDeleteSTaosxRsp(&taosxRsp);
|
||||||
taosMemoryFreeClear(pCkHead);
|
taosMemoryFreeClear(pCkHead);
|
||||||
return 0;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) {
|
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) {
|
||||||
|
|
|
@ -1437,12 +1437,12 @@ _return:
|
||||||
SMetaRes* pRes = taosArrayGet(ctx->pResList, pFetch->resIdx);
|
SMetaRes* pRes = taosArrayGet(ctx->pResList, pFetch->resIdx);
|
||||||
pRes->code = code;
|
pRes->code = code;
|
||||||
pRes->pRes = NULL;
|
pRes->pRes = NULL;
|
||||||
|
ctgTaskError("Get table %d.%s.%s meta failed with error %s", pName->acctId, pName->dbname, pName->tname,
|
||||||
|
tstrerror(code));
|
||||||
if (0 == atomic_sub_fetch_32(&ctx->fetchNum, 1)) {
|
if (0 == atomic_sub_fetch_32(&ctx->fetchNum, 1)) {
|
||||||
TSWAP(pTask->res, ctx->pResList);
|
TSWAP(pTask->res, ctx->pResList);
|
||||||
taskDone = true;
|
taskDone = true;
|
||||||
}
|
}
|
||||||
ctgTaskError("Get table %d.%s.%s meta failed with error %s", pName->acctId, pName->dbname, pName->tname,
|
|
||||||
tstrerror(code));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->res && taskDone) {
|
if (pTask->res && taskDone) {
|
||||||
|
|
|
@ -1484,14 +1484,23 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SHashObj *pSelectFuncs = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
|
||||||
for (int32_t i = 0; i < numOfOutput; ++i) {
|
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||||
const char* pName = pCtx[i].pExpr->pExpr->_function.functionName;
|
const char* pName = pCtx[i].pExpr->pExpr->_function.functionName;
|
||||||
if ((strcmp(pName, "_select_value") == 0) || (strcmp(pName, "_group_key") == 0)) {
|
if ((strcmp(pName, "_select_value") == 0) || (strcmp(pName, "_group_key") == 0)) {
|
||||||
pValCtx[num++] = &pCtx[i];
|
pValCtx[num++] = &pCtx[i];
|
||||||
} else if (fmIsSelectFunc(pCtx[i].functionId)) {
|
} else if (fmIsSelectFunc(pCtx[i].functionId)) {
|
||||||
|
void* data = taosHashGet(pSelectFuncs, pName, strlen(pName));
|
||||||
|
if (taosHashGetSize(pSelectFuncs) != 0 && data == NULL) {
|
||||||
|
p = NULL;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
taosHashPut(pSelectFuncs, pName, strlen(pName), &num, sizeof(num));
|
||||||
p = &pCtx[i];
|
p = &pCtx[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
taosHashCleanup(pSelectFuncs);
|
||||||
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
p->subsidiaries.pCtx = pValCtx;
|
p->subsidiaries.pCtx = pValCtx;
|
||||||
|
|
|
@ -1583,7 +1583,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
|
||||||
// currently only the tbname pseudo column
|
// currently only the tbname pseudo column
|
||||||
if (pInfo->numOfPseudoExpr > 0) {
|
if (pInfo->numOfPseudoExpr > 0) {
|
||||||
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes,
|
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes,
|
||||||
pInfo->pRes->info.rows, GET_TASKID(pTaskInfo), NULL);
|
pInfo->pRes->info.rows, GET_TASKID(pTaskInfo), &pTableScanInfo->base.metaCache);
|
||||||
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
|
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
|
||||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||||
blockDataFreeRes((SSDataBlock*)pBlock);
|
blockDataFreeRes((SSDataBlock*)pBlock);
|
||||||
|
|
|
@ -388,6 +388,9 @@ static bool isSetUselessCol(SSetOperator* pSetOp, int32_t index, SExprNode* pPro
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t calcConstSetOpProjections(SCalcConstContext* pCxt, SSetOperator* pSetOp, bool subquery) {
|
static int32_t calcConstSetOpProjections(SCalcConstContext* pCxt, SSetOperator* pSetOp, bool subquery) {
|
||||||
|
if (subquery && pSetOp->opType == SET_OP_TYPE_UNION) {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
int32_t index = 0;
|
int32_t index = 0;
|
||||||
SNode* pProj = NULL;
|
SNode* pProj = NULL;
|
||||||
WHERE_EACH(pProj, pSetOp->pProjectionList) {
|
WHERE_EACH(pProj, pSetOp->pProjectionList) {
|
||||||
|
|
|
@ -5330,7 +5330,8 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType) {
|
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType) {
|
||||||
if (calcTypeBytes(pStmt->dataType) > TSDB_MAX_FIELD_LEN) {
|
if ((TSDB_DATA_TYPE_VARCHAR == pStmt->dataType.type && calcTypeBytes(pStmt->dataType) > TSDB_MAX_BINARY_LEN) ||
|
||||||
|
(TSDB_DATA_TYPE_NCHAR == pStmt->dataType.type && calcTypeBytes(pStmt->dataType) > TSDB_MAX_NCHAR_LEN)) {
|
||||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5355,6 +5356,11 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
|
||||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((TSDB_DATA_TYPE_VARCHAR == pStmt->dataType.type && calcTypeBytes(pStmt->dataType) > TSDB_MAX_BINARY_LEN) ||
|
||||||
|
(TSDB_DATA_TYPE_NCHAR == pStmt->dataType.type && calcTypeBytes(pStmt->dataType) > TSDB_MAX_NCHAR_LEN)) {
|
||||||
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
||||||
|
}
|
||||||
|
|
||||||
if (pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) > TSDB_MAX_BYTES_PER_ROW) {
|
if (pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) > TSDB_MAX_BYTES_PER_ROW) {
|
||||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
|
||||||
}
|
}
|
||||||
|
@ -8359,6 +8365,11 @@ static int32_t buildUpdateColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt
|
||||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((TSDB_DATA_TYPE_VARCHAR == pStmt->dataType.type && calcTypeBytes(pStmt->dataType) > TSDB_MAX_BINARY_LEN) ||
|
||||||
|
(TSDB_DATA_TYPE_NCHAR == pStmt->dataType.type && calcTypeBytes(pStmt->dataType) > TSDB_MAX_NCHAR_LEN)) {
|
||||||
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
||||||
|
}
|
||||||
|
|
||||||
if (pTableMeta->tableInfo.rowSize + pReq->colModBytes - pSchema->bytes > TSDB_MAX_BYTES_PER_ROW) {
|
if (pTableMeta->tableInfo.rowSize + pReq->colModBytes - pSchema->bytes > TSDB_MAX_BYTES_PER_ROW) {
|
||||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,14 @@
|
||||||
#include "scalar.h"
|
#include "scalar.h"
|
||||||
#include "tglobal.h"
|
#include "tglobal.h"
|
||||||
|
|
||||||
|
static void debugPrintNode(SNode* pNode) {
|
||||||
|
char* pStr = NULL;
|
||||||
|
nodesNodeToString(pNode, false, &pStr, NULL);
|
||||||
|
printf("%s\n", pStr);
|
||||||
|
taosMemoryFree(pStr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static void dumpQueryPlan(SQueryPlan* pPlan) {
|
static void dumpQueryPlan(SQueryPlan* pPlan) {
|
||||||
if (!tsQueryPlannerTrace) {
|
if (!tsQueryPlannerTrace) {
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -18,6 +18,9 @@
|
||||||
|
|
||||||
#define STREAM_TASK_INPUT_QUEUEU_CAPACITY 20480
|
#define STREAM_TASK_INPUT_QUEUEU_CAPACITY 20480
|
||||||
#define STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE (100)
|
#define STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE (100)
|
||||||
|
#define ONE_MB_F (1048576.0)
|
||||||
|
|
||||||
|
#define QUEUE_MEM_SIZE_IN_MB(_q) (taosQueueMemorySize(_q)/ONE_MB_F)
|
||||||
|
|
||||||
int32_t streamInit() {
|
int32_t streamInit() {
|
||||||
int8_t old;
|
int8_t old;
|
||||||
|
@ -281,45 +284,36 @@ int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, S
|
||||||
}
|
}
|
||||||
|
|
||||||
bool tInputQueueIsFull(const SStreamTask* pTask) {
|
bool tInputQueueIsFull(const SStreamTask* pTask) {
|
||||||
return taosQueueItemSize((pTask->inputQueue->queue)) >= STREAM_TASK_INPUT_QUEUEU_CAPACITY;
|
bool isFull = taosQueueItemSize((pTask->inputQueue->queue)) >= STREAM_TASK_INPUT_QUEUEU_CAPACITY;
|
||||||
|
double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue);
|
||||||
|
return (isFull || size >= STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
||||||
int8_t type = pItem->type;
|
int8_t type = pItem->type;
|
||||||
|
|
||||||
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
SStreamDataSubmit2* pSubmitBlock = streamSubmitBlockClone((SStreamDataSubmit2*)pItem);
|
|
||||||
if (pSubmitBlock == NULL) {
|
|
||||||
qDebug("task %d %p submit enqueue failed since out of memory", pTask->id.taskId, pTask);
|
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t numOfBlocks = taosQueueItemSize(pTask->inputQueue->queue) + 1;
|
int32_t numOfBlocks = taosQueueItemSize(pTask->inputQueue->queue) + 1;
|
||||||
double size = taosQueueMemorySize(pTask->inputQueue->queue) / 1048576.0;
|
double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue);
|
||||||
|
|
||||||
qDebug("s-task:%s submit enqueue %p %p msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
|
SStreamDataSubmit* px = (SStreamDataSubmit*)pItem;
|
||||||
pItem, pSubmitBlock->submit.msgStr, pSubmitBlock->submit.msgLen,
|
qDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
|
||||||
pSubmitBlock->submit.ver, numOfBlocks, size);
|
px->submit.msgLen, px->submit.ver, numOfBlocks, size);
|
||||||
|
|
||||||
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) &&
|
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && tInputQueueIsFull(pTask)) {
|
||||||
(numOfBlocks > STREAM_TASK_INPUT_QUEUEU_CAPACITY || (size >= STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE))) {
|
|
||||||
qError("s-task:%s input queue is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) abort", pTask->id.idStr,
|
qError("s-task:%s input queue is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) abort", pTask->id.idStr,
|
||||||
STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE,
|
STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE,
|
||||||
numOfBlocks, size);
|
numOfBlocks, size);
|
||||||
streamDataSubmitDestroy(pSubmitBlock);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosWriteQitem(pTask->inputQueue->queue, pSubmitBlock);
|
taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||||
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||||
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||||
int32_t numOfBlocks = taosQueueItemSize(pTask->inputQueue->queue) + 1;
|
int32_t numOfBlocks = taosQueueItemSize(pTask->inputQueue->queue) + 1;
|
||||||
double size = taosQueueMemorySize(pTask->inputQueue->queue) / 1048576.0;
|
double size = QUEUE_MEM_SIZE_IN_MB(pTask->inputQueue->queue);
|
||||||
|
|
||||||
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) &&
|
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && (tInputQueueIsFull(pTask))) {
|
||||||
(numOfBlocks > STREAM_TASK_INPUT_QUEUEU_CAPACITY || (size >= STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE))) {
|
|
||||||
qError("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
qError("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
||||||
pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, numOfBlocks,
|
pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, numOfBlocks,
|
||||||
size);
|
size);
|
||||||
|
@ -338,10 +332,6 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
||||||
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
|
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__NORMAL);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,8 +67,8 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type) {
|
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type) {
|
||||||
SStreamDataSubmit2* pDataSubmit = (SStreamDataSubmit2*)taosAllocateQitem(sizeof(SStreamDataSubmit2), DEF_QITEM, submit.msgLen);
|
SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM, pData->msgLen);
|
||||||
if (pDataSubmit == NULL) {
|
if (pDataSubmit == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -79,14 +79,14 @@ SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit, int32_t type) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pDataSubmit->submit = submit;
|
pDataSubmit->submit = *pData;
|
||||||
*pDataSubmit->dataRef = 1; // initialize the reference count to be 1
|
*pDataSubmit->dataRef = 1; // initialize the reference count to be 1
|
||||||
pDataSubmit->type = type;
|
pDataSubmit->type = type;
|
||||||
|
|
||||||
return pDataSubmit;
|
return pDataSubmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit) {
|
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit) {
|
||||||
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
|
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
|
||||||
ASSERT(ref >= 0 && pDataSubmit->type == STREAM_INPUT__DATA_SUBMIT);
|
ASSERT(ref >= 0 && pDataSubmit->type == STREAM_INPUT__DATA_SUBMIT);
|
||||||
|
|
||||||
|
@ -96,8 +96,8 @@ void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamMergedSubmit2* streamMergedSubmitNew() {
|
SStreamMergedSubmit* streamMergedSubmitNew() {
|
||||||
SStreamMergedSubmit2* pMerged = (SStreamMergedSubmit2*)taosAllocateQitem(sizeof(SStreamMergedSubmit2), DEF_QITEM, 0);
|
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)taosAllocateQitem(sizeof(SStreamMergedSubmit), DEF_QITEM, 0);
|
||||||
if (pMerged == NULL) {
|
if (pMerged == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -116,30 +116,30 @@ SStreamMergedSubmit2* streamMergedSubmitNew() {
|
||||||
return pMerged;
|
return pMerged;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamMergeSubmit(SStreamMergedSubmit2* pMerged, SStreamDataSubmit2* pSubmit) {
|
int32_t streamMergeSubmit(SStreamMergedSubmit* pMerged, SStreamDataSubmit* pSubmit) {
|
||||||
taosArrayPush(pMerged->dataRefs, &pSubmit->dataRef);
|
taosArrayPush(pMerged->dataRefs, &pSubmit->dataRef);
|
||||||
taosArrayPush(pMerged->submits, &pSubmit->submit);
|
taosArrayPush(pMerged->submits, &pSubmit->submit);
|
||||||
pMerged->ver = pSubmit->ver;
|
pMerged->ver = pSubmit->ver;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit2* pDataSubmit) {
|
static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit* pDataSubmit) {
|
||||||
atomic_add_fetch_32(pDataSubmit->dataRef, 1);
|
atomic_add_fetch_32(pDataSubmit->dataRef, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit) {
|
SStreamDataSubmit* streamSubmitBlockClone(SStreamDataSubmit* pSubmit) {
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
if (pSubmit->type == STREAM_INPUT__DATA_SUBMIT) {
|
if (pSubmit->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
len = pSubmit->submit.msgLen;
|
len = pSubmit->submit.msgLen;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamDataSubmit2* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit2), DEF_QITEM, len);
|
SStreamDataSubmit* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM, len);
|
||||||
if (pSubmitClone == NULL) {
|
if (pSubmitClone == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamDataSubmitRefInc(pSubmit);
|
streamDataSubmitRefInc(pSubmit);
|
||||||
memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit2));
|
memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit));
|
||||||
return pSubmitClone;
|
return pSubmitClone;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,17 +152,17 @@ SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem*
|
||||||
taosFreeQitem(pElem);
|
taosFreeQitem(pElem);
|
||||||
return dst;
|
return dst;
|
||||||
} else if (dst->type == STREAM_INPUT__MERGED_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
|
} else if (dst->type == STREAM_INPUT__MERGED_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
SStreamMergedSubmit2* pMerged = (SStreamMergedSubmit2*)dst;
|
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)dst;
|
||||||
SStreamDataSubmit2* pBlockSrc = (SStreamDataSubmit2*)pElem;
|
SStreamDataSubmit* pBlockSrc = (SStreamDataSubmit*)pElem;
|
||||||
streamMergeSubmit(pMerged, pBlockSrc);
|
streamMergeSubmit(pMerged, pBlockSrc);
|
||||||
taosFreeQitem(pElem);
|
taosFreeQitem(pElem);
|
||||||
return dst;
|
return dst;
|
||||||
} else if (dst->type == STREAM_INPUT__DATA_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
|
} else if (dst->type == STREAM_INPUT__DATA_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
SStreamMergedSubmit2* pMerged = streamMergedSubmitNew();
|
SStreamMergedSubmit* pMerged = streamMergedSubmitNew();
|
||||||
// todo handle error
|
// todo handle error
|
||||||
|
|
||||||
streamMergeSubmit(pMerged, (SStreamDataSubmit2*)dst);
|
streamMergeSubmit(pMerged, (SStreamDataSubmit*)dst);
|
||||||
streamMergeSubmit(pMerged, (SStreamDataSubmit2*)pElem);
|
streamMergeSubmit(pMerged, (SStreamDataSubmit*)pElem);
|
||||||
taosFreeQitem(dst);
|
taosFreeQitem(dst);
|
||||||
taosFreeQitem(pElem);
|
taosFreeQitem(pElem);
|
||||||
return (SStreamQueueItem*)pMerged;
|
return (SStreamQueueItem*)pMerged;
|
||||||
|
@ -180,10 +180,10 @@ void streamFreeQitem(SStreamQueueItem* data) {
|
||||||
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)blockDataFreeRes);
|
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)blockDataFreeRes);
|
||||||
taosFreeQitem(data);
|
taosFreeQitem(data);
|
||||||
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
|
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
streamDataSubmitDestroy((SStreamDataSubmit2*)data);
|
streamDataSubmitDestroy((SStreamDataSubmit*)data);
|
||||||
taosFreeQitem(data);
|
taosFreeQitem(data);
|
||||||
} else if (type == STREAM_INPUT__MERGED_SUBMIT) {
|
} else if (type == STREAM_INPUT__MERGED_SUBMIT) {
|
||||||
SStreamMergedSubmit2* pMerge = (SStreamMergedSubmit2*)data;
|
SStreamMergedSubmit* pMerge = (SStreamMergedSubmit*)data;
|
||||||
int32_t sz = taosArrayGetSize(pMerge->submits);
|
int32_t sz = taosArrayGetSize(pMerge->submits);
|
||||||
for (int32_t i = 0; i < sz; i++) {
|
for (int32_t i = 0; i < sz; i++) {
|
||||||
int32_t* pRef = taosArrayGetP(pMerge->dataRefs, i);
|
int32_t* pRef = taosArrayGetP(pMerge->dataRefs, i);
|
||||||
|
|
|
@ -51,7 +51,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
|
||||||
qSetMultiStreamInput(pExecutor, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK);
|
qSetMultiStreamInput(pExecutor, pTrigger->pBlock, 1, STREAM_INPUT__DATA_BLOCK);
|
||||||
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
|
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
|
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
|
||||||
const SStreamDataSubmit2* pSubmit = (const SStreamDataSubmit2*)data;
|
const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)data;
|
||||||
qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT);
|
qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT);
|
||||||
qDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, pTask->id.idStr, pSubmit, pSubmit->submit.msgStr,
|
qDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, pTask->id.idStr, pSubmit, pSubmit->submit.msgStr,
|
||||||
pSubmit->submit.msgLen, pSubmit->submit.ver);
|
pSubmit->submit.msgLen, pSubmit->submit.ver);
|
||||||
|
@ -63,7 +63,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
|
||||||
qDebug("s-task:%s set sdata blocks as input num:%d, ver:%"PRId64, pTask->id.idStr, numOfBlocks, pBlock->sourceVer);
|
qDebug("s-task:%s set sdata blocks as input num:%d, ver:%"PRId64, pTask->id.idStr, numOfBlocks, pBlock->sourceVer);
|
||||||
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__DATA_BLOCK);
|
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__DATA_BLOCK);
|
||||||
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
|
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
|
||||||
const SStreamMergedSubmit2* pMerged = (const SStreamMergedSubmit2*)data;
|
const SStreamMergedSubmit* pMerged = (const SStreamMergedSubmit*)data;
|
||||||
|
|
||||||
SArray* pBlockList = pMerged->submits;
|
SArray* pBlockList = pMerged->submits;
|
||||||
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
|
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
|
||||||
|
@ -367,11 +367,11 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
||||||
qRes->blocks = pRes;
|
qRes->blocks = pRes;
|
||||||
|
|
||||||
if (((SStreamQueueItem*)pInput)->type == STREAM_INPUT__DATA_SUBMIT) {
|
if (((SStreamQueueItem*)pInput)->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
SStreamDataSubmit2* pSubmit = (SStreamDataSubmit2*)pInput;
|
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)pInput;
|
||||||
qRes->childId = pTask->selfChildId;
|
qRes->childId = pTask->selfChildId;
|
||||||
qRes->sourceVer = pSubmit->ver;
|
qRes->sourceVer = pSubmit->ver;
|
||||||
} else if (((SStreamQueueItem*)pInput)->type == STREAM_INPUT__MERGED_SUBMIT) {
|
} else if (((SStreamQueueItem*)pInput)->type == STREAM_INPUT__MERGED_SUBMIT) {
|
||||||
SStreamMergedSubmit2* pMerged = (SStreamMergedSubmit2*)pInput;
|
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)pInput;
|
||||||
qRes->childId = pTask->selfChildId;
|
qRes->childId = pTask->selfChildId;
|
||||||
qRes->sourceVer = pMerged->ver;
|
qRes->sourceVer = pMerged->ver;
|
||||||
}
|
}
|
||||||
|
|
|
@ -105,3 +105,61 @@ SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue) {
|
||||||
return (SStreamQueueRes){0};
|
return (SStreamQueueRes){0};
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define MAX_STREAM_EXEC_BATCH_NUM 128
|
||||||
|
#define MIN_STREAM_EXEC_BATCH_NUM 16
|
||||||
|
|
||||||
|
// todo refactor:
|
||||||
|
// read data from input queue
|
||||||
|
typedef struct SQueueReader {
|
||||||
|
SStreamQueue* pQueue;
|
||||||
|
int32_t taskLevel;
|
||||||
|
int32_t maxBlocks; // maximum block in one batch
|
||||||
|
int32_t waitDuration; // maximum wait time to format several block into a batch to process, unit: ms
|
||||||
|
} SQueueReader;
|
||||||
|
|
||||||
|
SStreamQueueItem* doReadMultiBlocksFromQueue(SQueueReader* pReader, const char* idstr) {
|
||||||
|
int32_t numOfBlocks = 0;
|
||||||
|
int32_t tryCount = 0;
|
||||||
|
SStreamQueueItem* pRet = NULL;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
SStreamQueueItem* qItem = streamQueueNextItem(pReader->pQueue);
|
||||||
|
if (qItem == NULL) {
|
||||||
|
if (pReader->taskLevel == TASK_LEVEL__SOURCE && numOfBlocks < MIN_STREAM_EXEC_BATCH_NUM && tryCount < pReader->waitDuration) {
|
||||||
|
tryCount++;
|
||||||
|
taosMsleep(1);
|
||||||
|
qDebug("===stream===try again batchSize:%d", numOfBlocks);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
qDebug("===stream===break batchSize:%d", numOfBlocks);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pRet == NULL) {
|
||||||
|
pRet = qItem;
|
||||||
|
streamQueueProcessSuccess(pReader->pQueue);
|
||||||
|
if (pReader->taskLevel == TASK_LEVEL__SINK) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// todo we need to sort the data block, instead of just appending into the array list.
|
||||||
|
void* newRet = NULL;
|
||||||
|
if ((newRet = streamMergeQueueItem(pRet, qItem)) == NULL) {
|
||||||
|
streamQueueProcessFail(pReader->pQueue);
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
numOfBlocks++;
|
||||||
|
pRet = newRet;
|
||||||
|
streamQueueProcessSuccess(pReader->pQueue);
|
||||||
|
if (numOfBlocks > pReader->maxBlocks) {
|
||||||
|
qDebug("maximum blocks limit:%d reached, processing, %s", pReader->maxBlocks, idstr);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pRet;
|
||||||
|
}
|
||||||
|
|
|
@ -91,7 +91,6 @@ int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages) {
|
SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages) {
|
||||||
qWarn("open stream state, %s", path);
|
|
||||||
SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState));
|
SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState));
|
||||||
if (pState == NULL) {
|
if (pState == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
@ -112,13 +111,11 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
|
||||||
tstrncpy(statePath, path, 1024);
|
tstrncpy(statePath, path, 1024);
|
||||||
}
|
}
|
||||||
#ifdef USE_ROCKSDB
|
#ifdef USE_ROCKSDB
|
||||||
qWarn("open stream state1");
|
|
||||||
int code = streamInitBackend(pState, statePath);
|
int code = streamInitBackend(pState, statePath);
|
||||||
if (code == -1) {
|
if (code == -1) {
|
||||||
taosMemoryFree(pState);
|
taosMemoryFree(pState);
|
||||||
pState = NULL;
|
pState = NULL;
|
||||||
}
|
}
|
||||||
qWarn("open stream state2, %s", statePath);
|
|
||||||
pState->pTdbState->pOwner = pTask;
|
pState->pTdbState->pOwner = pTask;
|
||||||
pState->pFileState = NULL;
|
pState->pFileState = NULL;
|
||||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT);
|
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT);
|
||||||
|
|
|
@ -195,6 +195,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
if (pTask->outputType == TASK_OUTPUT__TABLE) {
|
if (pTask->outputType == TASK_OUTPUT__TABLE) {
|
||||||
tDeleteSchemaWrapper(pTask->tbSink.pSchemaWrapper);
|
tDeleteSchemaWrapper(pTask->tbSink.pSchemaWrapper);
|
||||||
taosMemoryFree(pTask->tbSink.pTSchema);
|
taosMemoryFree(pTask->tbSink.pTSchema);
|
||||||
|
tSimpleHashCleanup(pTask->tbSink.pTblInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
|
|
|
@ -295,6 +295,36 @@ void walAlignVersions(SWal* pWal) {
|
||||||
wInfo("vgId:%d, reset commitVer to %" PRId64, pWal->cfg.vgId, pWal->vers.commitVer);
|
wInfo("vgId:%d, reset commitVer to %" PRId64, pWal->cfg.vgId, pWal->vers.commitVer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int walRepairLogFileTs(SWal* pWal, bool* updateMeta) {
|
||||||
|
int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
|
||||||
|
int32_t fileIdx = -1;
|
||||||
|
int32_t lastCloseTs = 0;
|
||||||
|
char fnameStr[WAL_FILE_LEN] = {0};
|
||||||
|
|
||||||
|
while (++fileIdx < sz - 1) {
|
||||||
|
SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx);
|
||||||
|
if (pFileInfo->closeTs != -1) {
|
||||||
|
lastCloseTs = pFileInfo->closeTs;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
walBuildLogName(pWal, pFileInfo->firstVer, fnameStr);
|
||||||
|
int32_t mtime = 0;
|
||||||
|
if (taosStatFile(fnameStr, NULL, &mtime) < 0) {
|
||||||
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
|
wError("vgId:%d, failed to stat file due to %s, file:%s", pWal->cfg.vgId, strerror(errno), fnameStr);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (updateMeta != NULL) *updateMeta = true;
|
||||||
|
if (pFileInfo->createTs == -1) pFileInfo->createTs = lastCloseTs;
|
||||||
|
pFileInfo->closeTs = mtime;
|
||||||
|
lastCloseTs = pFileInfo->closeTs;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
bool walLogEntriesComplete(const SWal* pWal) {
|
bool walLogEntriesComplete(const SWal* pWal) {
|
||||||
int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
|
int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
|
||||||
bool complete = true;
|
bool complete = true;
|
||||||
|
@ -433,15 +463,8 @@ int walCheckAndRepairMeta(SWal* pWal) {
|
||||||
wError("failed to scan wal last ver since %s", terrstr());
|
wError("failed to scan wal last ver since %s", terrstr());
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
// remove the empty wal log, and its idx
|
// empty log file
|
||||||
wInfo("vgId:%d, wal remove empty file %s", pWal->cfg.vgId, fnameStr);
|
lastVer = pFileInfo->firstVer - 1;
|
||||||
taosRemoveFile(fnameStr);
|
|
||||||
walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr);
|
|
||||||
wInfo("vgId:%d, wal remove empty file %s", pWal->cfg.vgId, fnameStr);
|
|
||||||
taosRemoveFile(fnameStr);
|
|
||||||
// remove its meta entry
|
|
||||||
taosArrayRemove(pWal->fileInfoSet, fileIdx);
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// update lastVer
|
// update lastVer
|
||||||
|
@ -460,6 +483,11 @@ int walCheckAndRepairMeta(SWal* pWal) {
|
||||||
}
|
}
|
||||||
(void)walAlignVersions(pWal);
|
(void)walAlignVersions(pWal);
|
||||||
|
|
||||||
|
// repair ts of files
|
||||||
|
if (walRepairLogFileTs(pWal, &updateMeta) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// update meta file
|
// update meta file
|
||||||
if (updateMeta) {
|
if (updateMeta) {
|
||||||
(void)walSaveMeta(pWal);
|
(void)walSaveMeta(pWal);
|
||||||
|
|
|
@ -74,18 +74,17 @@ int32_t walNextValidMsg(SWalReader *pReader) {
|
||||||
int64_t lastVer = walGetLastVer(pReader->pWal);
|
int64_t lastVer = walGetLastVer(pReader->pWal);
|
||||||
int64_t committedVer = walGetCommittedVer(pReader->pWal);
|
int64_t committedVer = walGetCommittedVer(pReader->pWal);
|
||||||
int64_t appliedVer = walGetAppliedVer(pReader->pWal);
|
int64_t appliedVer = walGetAppliedVer(pReader->pWal);
|
||||||
while(appliedVer < committedVer){ // wait apply ver equal to commit ver, otherwise may lost data when consume data [TD-24010]
|
if(appliedVer < committedVer){ // wait apply ver equal to commit ver, otherwise may lost data when consume data [TD-24010]
|
||||||
wDebug("vgId:%d, wal apply ver:%"PRId64" smaller than commit ver:%"PRId64", so sleep 1ms", pReader->pWal->cfg.vgId, appliedVer, committedVer);
|
wDebug("vgId:%d, wal apply ver:%"PRId64" smaller than commit ver:%"PRId64, pReader->pWal->cfg.vgId, appliedVer, committedVer);
|
||||||
taosMsleep(1);
|
// taosMsleep(10);
|
||||||
appliedVer = walGetAppliedVer(pReader->pWal);
|
|
||||||
}
|
}
|
||||||
// int64_t endVer = pReader->cond.scanUncommited ? lastVer : committedVer;
|
// int64_t endVer = pReader->cond.scanUncommited ? lastVer : committedVer;
|
||||||
// endVer = TMIN(appliedVer, endVer);
|
int64_t endVer = TMIN(appliedVer, committedVer);
|
||||||
|
|
||||||
wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64
|
wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64
|
||||||
", applied index:%" PRId64,
|
", applied index:%" PRId64", end index:%" PRId64,
|
||||||
pReader->pWal->cfg.vgId, fetchVer, lastVer, committedVer, appliedVer);
|
pReader->pWal->cfg.vgId, fetchVer, lastVer, committedVer, appliedVer, endVer);
|
||||||
while (fetchVer <= committedVer) {
|
while (fetchVer <= endVer) {
|
||||||
if (walFetchHeadNew(pReader, fetchVer) < 0) {
|
if (walFetchHeadNew(pReader, fetchVer) < 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -284,15 +284,15 @@ int32_t walEndSnapshot(SWal *pWal) {
|
||||||
if (ver == -1) {
|
if (ver == -1) {
|
||||||
code = -1;
|
code = -1;
|
||||||
goto END;
|
goto END;
|
||||||
};
|
}
|
||||||
|
|
||||||
pWal->vers.snapshotVer = ver;
|
pWal->vers.snapshotVer = ver;
|
||||||
int ts = taosGetTimestampSec();
|
int ts = taosGetTimestampSec();
|
||||||
|
|
||||||
ver = TMAX(ver - pWal->vers.logRetention, pWal->vers.firstVer - 1);
|
ver = TMAX(ver - pWal->vers.logRetention, pWal->vers.firstVer - 1);
|
||||||
|
|
||||||
|
// compatible mode for refVer
|
||||||
bool hasTopic = false;
|
bool hasTopic = false;
|
||||||
int64_t refVer = ver;
|
int64_t refVer = INT64_MAX;
|
||||||
void *pIter = NULL;
|
void *pIter = NULL;
|
||||||
while (1) {
|
while (1) {
|
||||||
pIter = taosHashIterate(pWal->pRefHash, pIter);
|
pIter = taosHashIterate(pWal->pRefHash, pIter);
|
||||||
|
@ -300,54 +300,40 @@ int32_t walEndSnapshot(SWal *pWal) {
|
||||||
SWalRef *pRef = *(SWalRef **)pIter;
|
SWalRef *pRef = *(SWalRef **)pIter;
|
||||||
if (pRef->refVer == -1) continue;
|
if (pRef->refVer == -1) continue;
|
||||||
refVer = TMIN(refVer, pRef->refVer - 1);
|
refVer = TMIN(refVer, pRef->refVer - 1);
|
||||||
wDebug("vgId:%d, wal found ref %" PRId64 ", refId %" PRId64, pWal->cfg.vgId, pRef->refVer, pRef->refId);
|
|
||||||
hasTopic = true;
|
hasTopic = true;
|
||||||
}
|
}
|
||||||
// compatible mode
|
|
||||||
if (pWal->cfg.retentionPeriod == 0 && hasTopic) {
|
if (pWal->cfg.retentionPeriod == 0 && hasTopic) {
|
||||||
|
wInfo("vgId:%d, wal found refVer:%" PRId64 " in compatible mode, ver:%" PRId64, pWal->cfg.vgId, refVer, ver);
|
||||||
ver = TMIN(ver, refVer);
|
ver = TMIN(ver, refVer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// find files safe to delete
|
||||||
int deleteCnt = 0;
|
int deleteCnt = 0;
|
||||||
int64_t newTotSize = pWal->totSize;
|
int64_t newTotSize = pWal->totSize;
|
||||||
SWalFileInfo tmp;
|
SWalFileInfo tmp = {0};
|
||||||
tmp.firstVer = ver;
|
tmp.firstVer = ver;
|
||||||
// find files safe to delete
|
|
||||||
SWalFileInfo *pInfo = taosArraySearch(pWal->fileInfoSet, &tmp, compareWalFileInfo, TD_LE);
|
SWalFileInfo *pInfo = taosArraySearch(pWal->fileInfoSet, &tmp, compareWalFileInfo, TD_LE);
|
||||||
|
|
||||||
if (pInfo) {
|
if (pInfo) {
|
||||||
SWalFileInfo *pLastFileInfo = taosArrayGetLast(pWal->fileInfoSet);
|
wDebug("vgId:%d, wal search found file info. ver:%" PRId64 ", first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, ver,
|
||||||
wDebug("vgId:%d, wal search found file info: first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer,
|
pInfo->firstVer, pInfo->lastVer);
|
||||||
pInfo->lastVer);
|
ASSERT(ver <= pInfo->lastVer);
|
||||||
if (ver >= pInfo->lastVer) {
|
if (ver == pInfo->lastVer) {
|
||||||
pInfo++;
|
pInfo++;
|
||||||
wDebug("vgId:%d, wal remove advance one file: first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer,
|
|
||||||
pInfo->lastVer);
|
|
||||||
}
|
|
||||||
if (pInfo <= pLastFileInfo) {
|
|
||||||
wDebug("vgId:%d, wal end remove for first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer,
|
|
||||||
pInfo->lastVer);
|
|
||||||
} else {
|
|
||||||
wDebug("vgId:%d, wal no remove", pWal->cfg.vgId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// iterate files, until the searched result
|
// iterate files, until the searched result
|
||||||
for (SWalFileInfo *iter = pWal->fileInfoSet->pData; iter < pInfo; iter++) {
|
|
||||||
wDebug("vgId:%d, wal check remove file %" PRId64 "(file size %" PRId64 " close ts %" PRId64
|
|
||||||
"), new tot size %" PRId64,
|
|
||||||
pWal->cfg.vgId, iter->firstVer, iter->fileSize, iter->closeTs, newTotSize);
|
|
||||||
if ((pWal->cfg.retentionSize != -1 && pWal->cfg.retentionSize != 0 && newTotSize > pWal->cfg.retentionSize) ||
|
|
||||||
((pWal->cfg.retentionPeriod == 0) || (pWal->cfg.retentionPeriod != -1 && iter->closeTs != -1 &&
|
|
||||||
iter->closeTs + pWal->cfg.retentionPeriod < ts))) {
|
|
||||||
// delete according to file size or close time
|
// delete according to file size or close time
|
||||||
wDebug("vgId:%d, check pass", pWal->cfg.vgId);
|
for (SWalFileInfo *iter = pWal->fileInfoSet->pData; iter < pInfo; iter++) {
|
||||||
|
if ((pWal->cfg.retentionSize > 0 && newTotSize > pWal->cfg.retentionSize) ||
|
||||||
|
(pWal->cfg.retentionPeriod == 0 ||
|
||||||
|
pWal->cfg.retentionPeriod > 0 && iter->closeTs >= 0 && iter->closeTs + pWal->cfg.retentionPeriod < ts)) {
|
||||||
deleteCnt++;
|
deleteCnt++;
|
||||||
newTotSize -= iter->fileSize;
|
newTotSize -= iter->fileSize;
|
||||||
taosArrayPush(pWal->toDeleteFiles, iter);
|
taosArrayPush(pWal->toDeleteFiles, iter);
|
||||||
}
|
}
|
||||||
wDebug("vgId:%d, check not pass", pWal->cfg.vgId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UPDATE_META:
|
|
||||||
// make new array, remove files
|
// make new array, remove files
|
||||||
taosArrayPopFrontBatch(pWal->fileInfoSet, deleteCnt);
|
taosArrayPopFrontBatch(pWal->fileInfoSet, deleteCnt);
|
||||||
if (taosArrayGetSize(pWal->fileInfoSet) == 0) {
|
if (taosArrayGetSize(pWal->fileInfoSet) == 0) {
|
||||||
|
@ -357,11 +343,12 @@ int32_t walEndSnapshot(SWal *pWal) {
|
||||||
pWal->vers.firstVer = ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, 0))->firstVer;
|
pWal->vers.firstVer = ((SWalFileInfo *)taosArrayGet(pWal->fileInfoSet, 0))->firstVer;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// update meta
|
||||||
pWal->writeCur = taosArrayGetSize(pWal->fileInfoSet) - 1;
|
pWal->writeCur = taosArrayGetSize(pWal->fileInfoSet) - 1;
|
||||||
pWal->totSize = newTotSize;
|
pWal->totSize = newTotSize;
|
||||||
pWal->vers.verInSnapshotting = -1;
|
pWal->vers.verInSnapshotting = -1;
|
||||||
|
|
||||||
// save snapshot ver, commit ver
|
|
||||||
code = walSaveMeta(pWal);
|
code = walSaveMeta(pWal);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
goto END;
|
goto END;
|
||||||
|
@ -369,23 +356,27 @@ int32_t walEndSnapshot(SWal *pWal) {
|
||||||
|
|
||||||
// delete files
|
// delete files
|
||||||
deleteCnt = taosArrayGetSize(pWal->toDeleteFiles);
|
deleteCnt = taosArrayGetSize(pWal->toDeleteFiles);
|
||||||
wDebug("vgId:%d, wal should delete %d files", pWal->cfg.vgId, deleteCnt);
|
char fnameStr[WAL_FILE_LEN] = {0};
|
||||||
char fnameStr[WAL_FILE_LEN];
|
pInfo = NULL;
|
||||||
|
|
||||||
for (int i = 0; i < deleteCnt; i++) {
|
for (int i = 0; i < deleteCnt; i++) {
|
||||||
pInfo = taosArrayGet(pWal->toDeleteFiles, i);
|
pInfo = taosArrayGet(pWal->toDeleteFiles, i);
|
||||||
|
|
||||||
walBuildLogName(pWal, pInfo->firstVer, fnameStr);
|
walBuildLogName(pWal, pInfo->firstVer, fnameStr);
|
||||||
wDebug("vgId:%d, wal remove file %s", pWal->cfg.vgId, fnameStr);
|
|
||||||
if (taosRemoveFile(fnameStr) < 0 && errno != ENOENT) {
|
if (taosRemoveFile(fnameStr) < 0 && errno != ENOENT) {
|
||||||
wError("vgId:%d, failed to remove log file %s due to %s", pWal->cfg.vgId, fnameStr, strerror(errno));
|
wError("vgId:%d, failed to remove log file %s due to %s", pWal->cfg.vgId, fnameStr, strerror(errno));
|
||||||
goto END;
|
goto END;
|
||||||
}
|
}
|
||||||
walBuildIdxName(pWal, pInfo->firstVer, fnameStr);
|
walBuildIdxName(pWal, pInfo->firstVer, fnameStr);
|
||||||
wDebug("vgId:%d, wal remove file %s", pWal->cfg.vgId, fnameStr);
|
|
||||||
if (taosRemoveFile(fnameStr) < 0 && errno != ENOENT) {
|
if (taosRemoveFile(fnameStr) < 0 && errno != ENOENT) {
|
||||||
wError("vgId:%d, failed to remove idx file %s due to %s", pWal->cfg.vgId, fnameStr, strerror(errno));
|
wError("vgId:%d, failed to remove idx file %s due to %s", pWal->cfg.vgId, fnameStr, strerror(errno));
|
||||||
goto END;
|
goto END;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (pInfo != NULL) {
|
||||||
|
wInfo("vgId:%d, wal log files recycled. count:%d, until ver:%" PRId64 ", closeTs:%" PRId64, pWal->cfg.vgId,
|
||||||
|
deleteCnt, pInfo->lastVer, pInfo->closeTs);
|
||||||
|
}
|
||||||
taosArrayClear(pWal->toDeleteFiles);
|
taosArrayClear(pWal->toDeleteFiles);
|
||||||
|
|
||||||
END:
|
END:
|
||||||
|
|
|
@ -52,8 +52,9 @@ class ConfigureyCluster:
|
||||||
dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}")
|
dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}")
|
||||||
|
|
||||||
# configure dnoe of independent mnodes
|
# configure dnoe of independent mnodes
|
||||||
if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True :
|
if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == "True" :
|
||||||
dnode.addExtraCfg("supportVnodes", 1024)
|
tdLog.info("set mnode supportVnodes 0")
|
||||||
|
dnode.addExtraCfg("supportVnodes", 0)
|
||||||
# print(dnode)
|
# print(dnode)
|
||||||
self.dnodes.append(dnode)
|
self.dnodes.append(dnode)
|
||||||
return self.dnodes
|
return self.dnodes
|
||||||
|
@ -71,6 +72,7 @@ class ConfigureyCluster:
|
||||||
tdSql.init(conn.cursor())
|
tdSql.init(conn.cursor())
|
||||||
mnodeNums=int(mnodeNums)
|
mnodeNums=int(mnodeNums)
|
||||||
for i in range(2,mnodeNums+1):
|
for i in range(2,mnodeNums+1):
|
||||||
|
tdLog.info("create mnode on dnode %d"%i)
|
||||||
tdSql.execute(" create mnode on dnode %d;"%i)
|
tdSql.execute(" create mnode on dnode %d;"%i)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -657,17 +657,34 @@ if $data20 != null then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
print =============== error
|
print =============== error for normal table
|
||||||
sql create table tb2023(ts timestamp, f int);
|
sql create table tb2023(ts timestamp, f int);
|
||||||
sql_error alter table tb2023 add column v varchar(16375);
|
sql_error alter table tb2023 add column v varchar(16375);
|
||||||
sql_error alter table tb2023 add column v varchar(16385);
|
sql_error alter table tb2023 add column v varchar(16385);
|
||||||
sql_error alter table tb2023 add column v varchar(33100);
|
sql_error alter table tb2023 add column v varchar(33100);
|
||||||
sql alter table tb2023 add column v varchar(16374);
|
sql alter table tb2023 add column v varchar(16374);
|
||||||
|
sql_error alter table tb2023 modify column v varchar(16375);
|
||||||
sql desc tb2023
|
sql desc tb2023
|
||||||
sql alter table tb2023 drop column v
|
sql alter table tb2023 drop column v
|
||||||
sql_error alter table tb2023 add column v nchar(4094);
|
sql_error alter table tb2023 add column v nchar(4094);
|
||||||
sql alter table tb2023 add column v nchar(4093);
|
sql alter table tb2023 add column v nchar(4093);
|
||||||
|
sql_error alter table tb2023 modify column v nchar(4094);
|
||||||
sql desc tb2023
|
sql desc tb2023
|
||||||
|
|
||||||
|
print =============== error for super table
|
||||||
|
sql create table stb2023(ts timestamp, f int) tags(t1 int);
|
||||||
|
sql_error alter table stb2023 add column v varchar(16375);
|
||||||
|
sql_error alter table stb2023 add column v varchar(16385);
|
||||||
|
sql_error alter table stb2023 add column v varchar(33100);
|
||||||
|
sql alter table stb2023 add column v varchar(16374);
|
||||||
|
sql_error alter table stb2023 modify column v varchar(16375);
|
||||||
|
sql desc stb2023
|
||||||
|
sql alter table stb2023 drop column v
|
||||||
|
sql_error alter table stb2023 add column v nchar(4094);
|
||||||
|
sql alter table stb2023 add column v nchar(4093);
|
||||||
|
sql_error alter table stb2023 modify column v nchar(4094);
|
||||||
|
sql desc stb2023
|
||||||
|
|
||||||
print ======= over
|
print ======= over
|
||||||
sql drop database d1
|
sql drop database d1
|
||||||
sql select * from information_schema.ins_databases
|
sql select * from information_schema.ins_databases
|
||||||
|
|
|
@ -48,7 +48,7 @@ sql_error alter table tb modify column c2 binary(10);
|
||||||
sql_error alter table tb modify column c2 binary(9);
|
sql_error alter table tb modify column c2 binary(9);
|
||||||
sql_error alter table tb modify column c2 binary(-9);
|
sql_error alter table tb modify column c2 binary(-9);
|
||||||
sql_error alter table tb modify column c2 binary(0);
|
sql_error alter table tb modify column c2 binary(0);
|
||||||
sql alter table tb modify column c2 binary(17000);
|
sql_error alter table tb modify column c2 binary(17000);
|
||||||
sql_error alter table tb modify column c2 nchar(30);
|
sql_error alter table tb modify column c2 nchar(30);
|
||||||
sql_error alter table tb modify column c3 double;
|
sql_error alter table tb modify column c3 double;
|
||||||
sql_error alter table tb modify column c3 nchar(10);
|
sql_error alter table tb modify column c3 nchar(10);
|
||||||
|
|
|
@ -25,4 +25,21 @@ if $data05 != @0021001@ then
|
||||||
return -1
|
return -1
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
sql create table st (ts timestamp, f int) tags (t int);
|
||||||
|
sql insert into ct1 using st tags(1) values(now, 1)(now+1s, 2)
|
||||||
|
sql insert into ct2 using st tags(2) values(now+2s, 3)(now+3s, 4)
|
||||||
|
sql select count(*) from (select * from ct1 union all select * from ct2)
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != 4 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql select count(*) from (select * from ct1 union select * from ct2)
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != 4 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
|
|
@ -29,6 +29,7 @@ class TDTestCase:
|
||||||
self.stbname = 'stb'
|
self.stbname = 'stb'
|
||||||
self.binary_length = 20 # the length of binary for column_dict
|
self.binary_length = 20 # the length of binary for column_dict
|
||||||
self.nchar_length = 20 # the length of nchar for column_dict
|
self.nchar_length = 20 # the length of nchar for column_dict
|
||||||
|
self.dbnames = ['db1', 'db2']
|
||||||
self.column_dict = {
|
self.column_dict = {
|
||||||
'ts': 'timestamp',
|
'ts': 'timestamp',
|
||||||
'col1': 'float',
|
'col1': 'float',
|
||||||
|
@ -57,21 +58,25 @@ class TDTestCase:
|
||||||
def create_user(self):
|
def create_user(self):
|
||||||
user_name = 'test'
|
user_name = 'test'
|
||||||
tdSql.execute(f'create user {user_name} pass "test"')
|
tdSql.execute(f'create user {user_name} pass "test"')
|
||||||
tdSql.execute(f'grant read on db.stb with t2 = "Beijing" to {user_name}')
|
tdSql.execute(f'grant read on {self.dbnames[0]}.{self.stbname} with t2 = "Beijing" to {user_name}')
|
||||||
|
tdSql.execute(f'grant write on {self.dbnames[1]}.{self.stbname} with t1 = 2 to {user_name}')
|
||||||
|
|
||||||
def prepare_data(self):
|
def prepare_data(self):
|
||||||
|
for db in self.dbnames:
|
||||||
|
tdSql.execute(f"create database {db}")
|
||||||
|
tdSql.execute(f"use {db}")
|
||||||
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict))
|
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict))
|
||||||
for i in range(self.tbnum):
|
for i in range(self.tbnum):
|
||||||
tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})')
|
tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})')
|
||||||
for j in self.values_list:
|
for j in self.values_list:
|
||||||
tdSql.execute(f'insert into {self.stbname}_{i} values({j})')
|
tdSql.execute(f'insert into {self.stbname}_{i} values({j})')
|
||||||
|
|
||||||
def user_privilege_check(self):
|
def user_read_privilege_check(self, dbname):
|
||||||
testconn = taos.connect(user='test', password='test')
|
testconn = taos.connect(user='test', password='test')
|
||||||
expectErrNotOccured = False
|
expectErrNotOccured = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sql = "select count(*) from db.stb where t2 = 'Beijing'"
|
sql = f"select count(*) from {dbname}.stb where t2 = 'Beijing'"
|
||||||
res = testconn.query(sql)
|
res = testconn.query(sql)
|
||||||
data = res.fetch_all()
|
data = res.fetch_all()
|
||||||
count = data[0][0]
|
count = data[0][0]
|
||||||
|
@ -85,11 +90,30 @@ class TDTestCase:
|
||||||
tdLog.exit(f"{sql}, expect result doesn't match")
|
tdLog.exit(f"{sql}, expect result doesn't match")
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def user_write_privilege_check(self, dbname):
|
||||||
|
testconn = taos.connect(user='test', password='test')
|
||||||
|
expectErrNotOccured = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
sql = f"insert into {dbname}.stb_1 values(now, 1.1, 200, 0.3)"
|
||||||
|
testconn.execute(sql)
|
||||||
|
except BaseException:
|
||||||
|
expectErrNotOccured = True
|
||||||
|
|
||||||
|
if expectErrNotOccured:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured")
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
def user_privilege_error_check(self):
|
def user_privilege_error_check(self):
|
||||||
testconn = taos.connect(user='test', password='test')
|
testconn = taos.connect(user='test', password='test')
|
||||||
expectErrNotOccured = False
|
expectErrNotOccured = False
|
||||||
|
|
||||||
sql_list = ["alter talbe db.stb_1 set t2 = 'Wuhan'", "drop table db.stb_1"]
|
sql_list = [f"alter talbe {self.dbnames[0]}.stb_1 set t2 = 'Wuhan'",
|
||||||
|
f"insert into {self.dbnames[0]}.stb_1 values(now, 1.1, 200, 0.3)",
|
||||||
|
f"drop table {self.dbnames[0]}.stb_1",
|
||||||
|
f"select count(*) from {self.dbnames[1]}.stb"]
|
||||||
|
|
||||||
for sql in sql_list:
|
for sql in sql_list:
|
||||||
try:
|
try:
|
||||||
|
@ -105,10 +129,10 @@ class TDTestCase:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
tdSql.prepare()
|
|
||||||
self.prepare_data()
|
self.prepare_data()
|
||||||
self.create_user()
|
self.create_user()
|
||||||
self.user_privilege_check()
|
self.user_read_privilege_check(self.dbnames[0])
|
||||||
|
self.user_write_privilege_check(self.dbnames[1])
|
||||||
self.user_privilege_error_check()
|
self.user_privilege_error_check()
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
|
|
@ -207,7 +207,7 @@ class ClusterComCheck:
|
||||||
count+=1
|
count+=1
|
||||||
else:
|
else:
|
||||||
tdLog.debug(tdSql.queryResult)
|
tdLog.debug(tdSql.queryResult)
|
||||||
tdLog.exit("stop mnodes on dnode %d failed in 10s ")
|
tdLog.exit(f"stop mnodes on dnode {offlineDnodeNo} failed in 10s ")
|
||||||
|
|
||||||
def check3mnode2off(self,mnodeNums=3):
|
def check3mnode2off(self,mnodeNums=3):
|
||||||
count=0
|
count=0
|
||||||
|
@ -226,7 +226,45 @@ class ClusterComCheck:
|
||||||
count+=1
|
count+=1
|
||||||
else:
|
else:
|
||||||
tdLog.debug(tdSql.queryResult)
|
tdLog.debug(tdSql.queryResult)
|
||||||
tdLog.exit("stop mnodes on dnode %d failed in 10s ")
|
tdLog.exit("stop mnodes on dnode 2 or 3 failed in 10s")
|
||||||
|
|
||||||
|
def check_vgroups_status(self,vgroup_numbers=2,db_replica=3,count_number=10,db_name="db"):
|
||||||
|
""" check vgroups status in 10s after db vgroups status is changed """
|
||||||
|
vgroup_numbers = int(vgroup_numbers)
|
||||||
|
self.db_replica = int(db_replica)
|
||||||
|
tdLog.debug("start to check status of vgroups")
|
||||||
|
count=0
|
||||||
|
last_number=vgroup_numbers-1
|
||||||
|
while count < count_number:
|
||||||
|
time.sleep(1)
|
||||||
|
tdSql.query(f"show {db_name}.vgroups;")
|
||||||
|
if count == 0 :
|
||||||
|
if tdSql.checkRows(vgroup_numbers) :
|
||||||
|
tdLog.success(f"{db_name} has {vgroup_numbers} vgroups" )
|
||||||
|
else:
|
||||||
|
tdLog.exit(f"vgroup number of {db_name} is not correct")
|
||||||
|
if self.db_replica == 1 :
|
||||||
|
if tdSql.queryResult[0][4] == 'leader' and tdSql.queryResult[1][4] == 'leader' and tdSql.queryResult[last_number][4] == 'leader':
|
||||||
|
ready_time= (count + 1)
|
||||||
|
tdLog.success(f"all vgroups of {db_name} are leaders in {count + 1} s")
|
||||||
|
return True
|
||||||
|
count+=1
|
||||||
|
elif self.db_replica == 3 :
|
||||||
|
vgroup_status_first=[tdSql.queryResult[0][4],tdSql.queryResult[0][6],tdSql.queryResult[0][8]]
|
||||||
|
|
||||||
|
vgroup_status_last=[tdSql.queryResult[last_number][4],tdSql.queryResult[last_number][6],tdSql.queryResult[last_number][8]]
|
||||||
|
if vgroup_status_first.count('leader') == 1 and vgroup_status_first.count('follower') == 2:
|
||||||
|
if vgroup_status_last.count('leader') == 1 and vgroup_status_last.count('follower') == 2:
|
||||||
|
ready_time= (count + 1)
|
||||||
|
tdLog.success(f"all vgroups of {db_name} are ready in {ready_time} s")
|
||||||
|
return True
|
||||||
|
count+=1
|
||||||
|
else:
|
||||||
|
tdLog.debug(tdSql.queryResult)
|
||||||
|
tdLog.notice(f"all vgroups leader of {db_name} is selected {count}s ")
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno)
|
||||||
|
tdLog.exit("%s(%d) failed " % args)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,206 @@
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import TDDnodes
|
||||||
|
from util.dnodes import TDDnode
|
||||||
|
from util.cluster import *
|
||||||
|
sys.path.append("./6-cluster")
|
||||||
|
from clusterCommonCreate import *
|
||||||
|
from clusterCommonCheck import clusterComCheck
|
||||||
|
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
from multiprocessing import Process
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import inspect
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
self.TDDnodes = None
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
self.host = socket.gethostname()
|
||||||
|
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root) - len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def _async_raise(self, tid, exctype):
|
||||||
|
"""raises the exception, performs cleanup if needed"""
|
||||||
|
if not inspect.isclass(exctype):
|
||||||
|
exctype = type(exctype)
|
||||||
|
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||||
|
if res == 0:
|
||||||
|
raise ValueError("invalid thread id")
|
||||||
|
elif res != 1:
|
||||||
|
# """if it returns a number greater than one, you're in trouble,
|
||||||
|
# and you should call it again with exc=NULL to revert the effect"""
|
||||||
|
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||||
|
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||||
|
|
||||||
|
def stopThread(self,thread):
|
||||||
|
self._async_raise(thread.ident, SystemExit)
|
||||||
|
|
||||||
|
|
||||||
|
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'db0_0',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'replica': 1,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'stbNumbers': 2,
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbNum': 200,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
"rowsPerTbl": 1000,
|
||||||
|
"batchNum": 5000
|
||||||
|
}
|
||||||
|
|
||||||
|
dnodeNumbers=int(dnodeNumbers)
|
||||||
|
mnodeNums=int(mnodeNums)
|
||||||
|
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||||
|
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||||
|
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||||
|
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||||
|
dbNumbers = 1
|
||||||
|
|
||||||
|
tdLog.info("first check dnode and mnode")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||||
|
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
#check mnode status
|
||||||
|
tdLog.info("check mnode status")
|
||||||
|
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||||
|
|
||||||
|
# add some error operations and
|
||||||
|
tdLog.info("Confirm the status of the dnode again")
|
||||||
|
tdSql.error("create mnode on dnode 2")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
print(tdSql.queryResult)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
# create database and stable
|
||||||
|
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||||
|
tdLog.info("Take turns stopping Mnodes ")
|
||||||
|
|
||||||
|
tdDnodes=cluster.dnodes
|
||||||
|
stopcount =0
|
||||||
|
threads=[]
|
||||||
|
|
||||||
|
# create stable:stb_0
|
||||||
|
stableName= paraDict['stbName']
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||||
|
#create child table:ctb_0
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||||
|
#insert date
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||||
|
for tr in threads:
|
||||||
|
tr.start()
|
||||||
|
for tr in threads:
|
||||||
|
tr.join()
|
||||||
|
|
||||||
|
while stopcount < restartNumbers:
|
||||||
|
tdLog.info(" restart loop: %d"%stopcount )
|
||||||
|
if stopRole == "mnode":
|
||||||
|
for i in range(mnodeNums):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "vnode":
|
||||||
|
for i in range(vnodeNumbers):
|
||||||
|
tdDnodes[i+mnodeNums].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i+mnodeNums].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "dnode":
|
||||||
|
for i in range(dnodeNumbers):
|
||||||
|
if i == 0 :
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],0)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
# newTdSql.execute('alter database db0_0 replica 3')
|
||||||
|
clusterComCreate.alterStbMetaData(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
clusterComCheck.checkDbRows(dbNumbers)
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
if i == 3 :
|
||||||
|
TdSqlEx=tdCom.newTdSql()
|
||||||
|
tdLog.info("alter database db0_0 replica 3")
|
||||||
|
TdSqlEx.execute('alter database db0_0 replica 3')
|
||||||
|
|
||||||
|
|
||||||
|
# dnodeNumbers don't include database of schema
|
||||||
|
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||||
|
tdLog.info("123")
|
||||||
|
else:
|
||||||
|
print("456")
|
||||||
|
|
||||||
|
self.stopThread(threads)
|
||||||
|
tdLog.exit("one or more of dnodes failed to start ")
|
||||||
|
# self.check3mnode()
|
||||||
|
stopcount+=1
|
||||||
|
|
||||||
|
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
clusterComCheck.checkDbRows(dbNumbers)
|
||||||
|
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||||
|
|
||||||
|
# tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||||
|
tdSql.query("show %s.stables"%(paraDict["dbName"]))
|
||||||
|
tdSql.checkRows(paraDict["stbNumbers"])
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s.%s_%d'%(paraDict["dbName"],paraDict['stbName'],i)
|
||||||
|
tdSql.query("select count(*) from %s"%stableName)
|
||||||
|
if i == 0 :
|
||||||
|
tdSql.checkData(0,0,rowsPerStb*2)
|
||||||
|
else:
|
||||||
|
tdSql.checkData(0,0,rowsPerStb)
|
||||||
|
clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=3,db_name=paraDict["dbName"],count_number=150)
|
||||||
|
def run(self):
|
||||||
|
# print(self.master_dnode.cfgDict)
|
||||||
|
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,206 @@
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import TDDnodes
|
||||||
|
from util.dnodes import TDDnode
|
||||||
|
from util.cluster import *
|
||||||
|
sys.path.append("./6-cluster")
|
||||||
|
from clusterCommonCreate import *
|
||||||
|
from clusterCommonCheck import clusterComCheck
|
||||||
|
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
from multiprocessing import Process
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import inspect
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
self.TDDnodes = None
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
self.host = socket.gethostname()
|
||||||
|
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root) - len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def _async_raise(self, tid, exctype):
|
||||||
|
"""raises the exception, performs cleanup if needed"""
|
||||||
|
if not inspect.isclass(exctype):
|
||||||
|
exctype = type(exctype)
|
||||||
|
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||||
|
if res == 0:
|
||||||
|
raise ValueError("invalid thread id")
|
||||||
|
elif res != 1:
|
||||||
|
# """if it returns a number greater than one, you're in trouble,
|
||||||
|
# and you should call it again with exc=NULL to revert the effect"""
|
||||||
|
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||||
|
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||||
|
|
||||||
|
def stopThread(self,thread):
|
||||||
|
self._async_raise(thread.ident, SystemExit)
|
||||||
|
|
||||||
|
|
||||||
|
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'db0_0',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'replica': 3,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'stbNumbers': 2,
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbNum': 200,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
"rowsPerTbl": 1000,
|
||||||
|
"batchNum": 5000
|
||||||
|
}
|
||||||
|
|
||||||
|
dnodeNumbers=int(dnodeNumbers)
|
||||||
|
mnodeNums=int(mnodeNums)
|
||||||
|
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||||
|
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||||
|
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||||
|
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||||
|
dbNumbers = 1
|
||||||
|
|
||||||
|
tdLog.info("first check dnode and mnode")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||||
|
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
#check mnode status
|
||||||
|
tdLog.info("check mnode status")
|
||||||
|
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||||
|
|
||||||
|
# add some error operations and
|
||||||
|
tdLog.info("Confirm the status of the dnode again")
|
||||||
|
tdSql.error("create mnode on dnode 2")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
print(tdSql.queryResult)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
# create database and stable
|
||||||
|
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||||
|
tdLog.info("Take turns stopping Mnodes ")
|
||||||
|
|
||||||
|
tdDnodes=cluster.dnodes
|
||||||
|
stopcount =0
|
||||||
|
threads=[]
|
||||||
|
|
||||||
|
# create stable:stb_0
|
||||||
|
stableName= paraDict['stbName']
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||||
|
#create child table:ctb_0
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||||
|
#insert date
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||||
|
for tr in threads:
|
||||||
|
tr.start()
|
||||||
|
for tr in threads:
|
||||||
|
tr.join()
|
||||||
|
|
||||||
|
while stopcount < restartNumbers:
|
||||||
|
tdLog.info(" restart loop: %d"%stopcount )
|
||||||
|
if stopRole == "mnode":
|
||||||
|
for i in range(mnodeNums):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "vnode":
|
||||||
|
for i in range(vnodeNumbers):
|
||||||
|
tdDnodes[i+mnodeNums].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i+mnodeNums].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "dnode":
|
||||||
|
for i in range(dnodeNumbers):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
clusterComCheck.checkDbRows(dbNumbers)
|
||||||
|
if i == 0 :
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],0)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
# newTdSql.execute('alter database db0_0 replica 3')
|
||||||
|
clusterComCreate.alterStbMetaData(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
if i == 3 :
|
||||||
|
TdSqlEx=tdCom.newTdSql()
|
||||||
|
tdLog.info("alter database db0_0 replica 1")
|
||||||
|
TdSqlEx.execute('alter database db0_0 replica 1')
|
||||||
|
|
||||||
|
|
||||||
|
# dnodeNumbers don't include database of schema
|
||||||
|
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||||
|
tdLog.info("123")
|
||||||
|
else:
|
||||||
|
print("456")
|
||||||
|
|
||||||
|
self.stopThread(threads)
|
||||||
|
tdLog.exit("one or more of dnodes failed to start ")
|
||||||
|
# self.check3mnode()
|
||||||
|
stopcount+=1
|
||||||
|
|
||||||
|
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
clusterComCheck.checkDbRows(dbNumbers)
|
||||||
|
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||||
|
|
||||||
|
# tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||||
|
tdSql.query("show %s.stables"%(paraDict["dbName"]))
|
||||||
|
tdSql.checkRows(paraDict["stbNumbers"])
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s.%s_%d'%(paraDict["dbName"],paraDict['stbName'],i)
|
||||||
|
tdSql.query("select count(*) from %s"%stableName)
|
||||||
|
if i == 0 :
|
||||||
|
tdSql.checkData(0,0,rowsPerStb*2)
|
||||||
|
else:
|
||||||
|
tdSql.checkData(0,0,rowsPerStb)
|
||||||
|
clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=1,db_name=paraDict["dbName"],count_number=150)
|
||||||
|
def run(self):
|
||||||
|
# print(self.master_dnode.cfgDict)
|
||||||
|
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,222 @@
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import TDDnodes
|
||||||
|
from util.dnodes import TDDnode
|
||||||
|
from util.cluster import *
|
||||||
|
sys.path.append("./6-cluster")
|
||||||
|
from clusterCommonCreate import *
|
||||||
|
from clusterCommonCheck import clusterComCheck
|
||||||
|
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
from multiprocessing import Process
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import inspect
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
self.TDDnodes = None
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
self.host = socket.gethostname()
|
||||||
|
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root) - len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def _async_raise(self, tid, exctype):
|
||||||
|
"""raises the exception, performs cleanup if needed"""
|
||||||
|
if not inspect.isclass(exctype):
|
||||||
|
exctype = type(exctype)
|
||||||
|
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||||
|
if res == 0:
|
||||||
|
raise ValueError("invalid thread id")
|
||||||
|
elif res != 1:
|
||||||
|
# """if it returns a number greater than one, you're in trouble,
|
||||||
|
# and you should call it again with exc=NULL to revert the effect"""
|
||||||
|
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||||
|
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||||
|
|
||||||
|
def stopThread(self,thread):
|
||||||
|
self._async_raise(thread.ident, SystemExit)
|
||||||
|
|
||||||
|
|
||||||
|
def insertData(self,countstart,countstop):
|
||||||
|
# fisrt add data : db\stable\childtable\general table
|
||||||
|
|
||||||
|
for couti in range(countstart,countstop):
|
||||||
|
tdLog.debug("drop database if exists db%d" %couti)
|
||||||
|
tdSql.execute("drop database if exists db%d" %couti)
|
||||||
|
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||||
|
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||||
|
tdSql.execute("use db%d" %couti)
|
||||||
|
tdSql.execute(
|
||||||
|
'''create table stb1
|
||||||
|
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||||
|
tags (t1 int)
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
tdSql.execute(
|
||||||
|
'''
|
||||||
|
create table t1
|
||||||
|
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
for i in range(4):
|
||||||
|
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||||
|
|
||||||
|
|
||||||
|
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'db0_0',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'replica': 1,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'stbNumbers': 2,
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbNum': 1000,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
"rowsPerTbl": 100,
|
||||||
|
"batchNum": 5000
|
||||||
|
}
|
||||||
|
|
||||||
|
dnodeNumbers = int(dnodeNumbers)
|
||||||
|
mnodeNums = int(mnodeNums)
|
||||||
|
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||||
|
allctbNumbers = (paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||||
|
rowsPerStb = paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||||
|
rowsall = rowsPerStb*paraDict['stbNumbers']
|
||||||
|
dbNumbers = 1
|
||||||
|
replica3 = 3
|
||||||
|
tdLog.info("first check dnode and mnode")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||||
|
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
#check mnode status
|
||||||
|
tdLog.info("check mnode status")
|
||||||
|
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||||
|
|
||||||
|
# add some error operations and
|
||||||
|
tdLog.info("Confirm the status of the dnode again")
|
||||||
|
tdSql.error("create mnode on dnode 2")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
print(tdSql.queryResult)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
# create database and stable
|
||||||
|
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||||
|
tdLog.info("Take turns stopping Mnodes ")
|
||||||
|
|
||||||
|
tdDnodes=cluster.dnodes
|
||||||
|
stopcount =0
|
||||||
|
threads=[]
|
||||||
|
|
||||||
|
# create stable:stb_0
|
||||||
|
stableName= paraDict['stbName']
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||||
|
#create child table:ctb_0
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||||
|
#insert date
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||||
|
for tr in threads:
|
||||||
|
tr.start()
|
||||||
|
TdSqlEx=tdCom.newTdSql()
|
||||||
|
tdLog.info("alter database db0_0 replica 3")
|
||||||
|
TdSqlEx.execute('alter database db0_0 replica 3')
|
||||||
|
while stopcount < restartNumbers:
|
||||||
|
tdLog.info(" restart loop: %d"%stopcount )
|
||||||
|
if stopRole == "mnode":
|
||||||
|
for i in range(mnodeNums):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "vnode":
|
||||||
|
for i in range(vnodeNumbers):
|
||||||
|
tdDnodes[i+mnodeNums].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i+mnodeNums].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "dnode":
|
||||||
|
for i in range(dnodeNumbers):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
# tdLog.info('select cast(c2 as nchar(10)) from db0_0.stb_1;')
|
||||||
|
# TdSqlEx.execute('select cast(c2 as nchar(10)) from db0_0.stb_1;')
|
||||||
|
# tdLog.info('select avg(c1) from db0_0.stb_0 interval(10s);')
|
||||||
|
# TdSqlEx.execute('select avg(c1) from db0_0.stb_0 interval(10s);')
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
# dnodeNumbers don't include database of schema
|
||||||
|
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||||
|
tdLog.info("123")
|
||||||
|
else:
|
||||||
|
print("456")
|
||||||
|
|
||||||
|
self.stopThread(threads)
|
||||||
|
tdLog.exit("one or more of dnodes failed to start ")
|
||||||
|
# self.check3mnode()
|
||||||
|
stopcount+=1
|
||||||
|
|
||||||
|
for tr in threads:
|
||||||
|
tr.join()
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
clusterComCheck.checkDbRows(dbNumbers)
|
||||||
|
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||||
|
|
||||||
|
# tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||||
|
tdSql.query("show %s.stables"%(paraDict["dbName"]))
|
||||||
|
tdSql.checkRows(paraDict["stbNumbers"])
|
||||||
|
# for i in range(paraDict['stbNumbers']):
|
||||||
|
# stableName= '%s.%s_%d'%(paraDict["dbName"],paraDict['stbName'],i)
|
||||||
|
# tdSql.query("select count(*) from %s"%stableName)
|
||||||
|
# tdSql.checkData(0,0,rowsPerStb)
|
||||||
|
clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=replica3,db_name=paraDict["dbName"],count_number=240)
|
||||||
|
def run(self):
|
||||||
|
# print(self.master_dnode.cfgDict)
|
||||||
|
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=4,stopRole='dnode')
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,196 @@
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import TDDnodes
|
||||||
|
from util.dnodes import TDDnode
|
||||||
|
from util.cluster import *
|
||||||
|
sys.path.append("./6-cluster")
|
||||||
|
from clusterCommonCreate import *
|
||||||
|
from clusterCommonCheck import clusterComCheck
|
||||||
|
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
from multiprocessing import Process
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import inspect
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
self.TDDnodes = None
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
self.host = socket.gethostname()
|
||||||
|
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root) - len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def _async_raise(self, tid, exctype):
|
||||||
|
"""raises the exception, performs cleanup if needed"""
|
||||||
|
if not inspect.isclass(exctype):
|
||||||
|
exctype = type(exctype)
|
||||||
|
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||||
|
if res == 0:
|
||||||
|
raise ValueError("invalid thread id")
|
||||||
|
elif res != 1:
|
||||||
|
# """if it returns a number greater than one, you're in trouble,
|
||||||
|
# and you should call it again with exc=NULL to revert the effect"""
|
||||||
|
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||||
|
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||||
|
|
||||||
|
def stopThread(self,thread):
|
||||||
|
self._async_raise(thread.ident, SystemExit)
|
||||||
|
|
||||||
|
|
||||||
|
def insertData(self,countstart,countstop):
|
||||||
|
# fisrt add data : db\stable\childtable\general table
|
||||||
|
|
||||||
|
for couti in range(countstart,countstop):
|
||||||
|
tdLog.debug("drop database if exists db%d" %couti)
|
||||||
|
tdSql.execute("drop database if exists db%d" %couti)
|
||||||
|
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||||
|
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||||
|
tdSql.execute("use db%d" %couti)
|
||||||
|
tdSql.execute(
|
||||||
|
'''create table stb1
|
||||||
|
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||||
|
tags (t1 int)
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
tdSql.execute(
|
||||||
|
'''
|
||||||
|
create table t1
|
||||||
|
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
for i in range(4):
|
||||||
|
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||||
|
|
||||||
|
|
||||||
|
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'db0_0',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'replica': 3,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'stbNumbers': 2,
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbNum': 1,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
"rowsPerTbl": 1,
|
||||||
|
"batchNum": 5000
|
||||||
|
}
|
||||||
|
|
||||||
|
dnodeNumbers=int(dnodeNumbers)
|
||||||
|
mnodeNums=int(mnodeNums)
|
||||||
|
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||||
|
replica1 = 1
|
||||||
|
replica3 = 3
|
||||||
|
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||||
|
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||||
|
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||||
|
dbNumbers = 1
|
||||||
|
|
||||||
|
tdLog.info("first check dnode and mnode")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||||
|
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
#check mnode status
|
||||||
|
tdLog.info("check mnode status")
|
||||||
|
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||||
|
|
||||||
|
# add some error operations and
|
||||||
|
tdLog.info("Confirm the status of the dnode again")
|
||||||
|
tdSql.error("create mnode on dnode 2")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
print(tdSql.queryResult)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
# create database and stable
|
||||||
|
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||||
|
tdLog.info("Take turns stopping Mnodes ")
|
||||||
|
|
||||||
|
tdDnodes=cluster.dnodes
|
||||||
|
stopcount =0
|
||||||
|
threads=[]
|
||||||
|
|
||||||
|
# create stable:stb_0
|
||||||
|
stableName= paraDict['stbName']
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||||
|
#create child table:ctb_0
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||||
|
#insert date
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||||
|
for tr in threads:
|
||||||
|
tr.start()
|
||||||
|
TdSqlEx=tdCom.newTdSql()
|
||||||
|
tdLog.info(f"alter database db0_0 replica {replica1}")
|
||||||
|
TdSqlEx.execute(f'alter database db0_0 replica {replica1}')
|
||||||
|
for tr in threads:
|
||||||
|
tr.join()
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
clusterComCheck.checkDbRows(dbNumbers)
|
||||||
|
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||||
|
|
||||||
|
# tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||||
|
tdSql.query("show %s.stables"%(paraDict["dbName"]))
|
||||||
|
tdSql.checkRows(paraDict["stbNumbers"])
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s.%s_%d'%(paraDict["dbName"],paraDict['stbName'],i)
|
||||||
|
tdSql.query("select count(*) from %s"%stableName)
|
||||||
|
tdSql.checkData(0,0,rowsPerStb)
|
||||||
|
|
||||||
|
clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=replica1,db_name=paraDict["dbName"],count_number=20)
|
||||||
|
sleep(5)
|
||||||
|
tdLog.info(f"show transactions;alter database db0_0 replica {replica3};")
|
||||||
|
TdSqlEx.execute(f'show transactions;')
|
||||||
|
TdSqlEx.execute(f'alter database db0_0 replica {replica3};')
|
||||||
|
clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=replica3,db_name=paraDict["dbName"],count_number=120)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
# print(self.master_dnode.cfgDict)
|
||||||
|
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=4,stopRole='dnode')
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,191 @@
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import TDDnodes
|
||||||
|
from util.dnodes import TDDnode
|
||||||
|
from util.cluster import *
|
||||||
|
sys.path.append("./6-cluster")
|
||||||
|
from clusterCommonCreate import *
|
||||||
|
from clusterCommonCheck import clusterComCheck
|
||||||
|
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
from multiprocessing import Process
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import inspect
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
self.TDDnodes = None
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
self.host = socket.gethostname()
|
||||||
|
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root) - len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def _async_raise(self, tid, exctype):
|
||||||
|
"""raises the exception, performs cleanup if needed"""
|
||||||
|
if not inspect.isclass(exctype):
|
||||||
|
exctype = type(exctype)
|
||||||
|
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||||
|
if res == 0:
|
||||||
|
raise ValueError("invalid thread id")
|
||||||
|
elif res != 1:
|
||||||
|
# """if it returns a number greater than one, you're in trouble,
|
||||||
|
# and you should call it again with exc=NULL to revert the effect"""
|
||||||
|
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||||
|
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||||
|
|
||||||
|
def stopThread(self,thread):
|
||||||
|
self._async_raise(thread.ident, SystemExit)
|
||||||
|
|
||||||
|
|
||||||
|
def insertData(self,dbname,tableCount,rowsPerCount):
|
||||||
|
# tableCount : create table number
|
||||||
|
# rowsPerCount : rows per table
|
||||||
|
# fisrt add data : db\stable\childtable\general table
|
||||||
|
os.system(f"taosBenchmark -d {dbname} -n {tableCount} -t {rowsPerCount} -z 1 -k 10000 -y ")
|
||||||
|
|
||||||
|
|
||||||
|
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'db0_0',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'replica': 1,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'stbNumbers': 2,
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbNum': 10000,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
"rowsPerTbl": 10000,
|
||||||
|
"batchNum": 5000
|
||||||
|
}
|
||||||
|
|
||||||
|
dnodeNumbers=int(dnodeNumbers)
|
||||||
|
mnodeNums=int(mnodeNums)
|
||||||
|
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||||
|
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||||
|
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||||
|
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||||
|
dbNumbers = 1
|
||||||
|
|
||||||
|
tdLog.info("first check dnode and mnode")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||||
|
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
#check mnode status
|
||||||
|
tdLog.info("check mnode status")
|
||||||
|
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||||
|
|
||||||
|
# add some error operations and
|
||||||
|
tdLog.info("Confirm the status of the dnode again")
|
||||||
|
tdSql.error("create mnode on dnode 2")
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes;")
|
||||||
|
print(tdSql.queryResult)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
# create database and stable
|
||||||
|
tdLog.info("Take turns stopping Mnodes ")
|
||||||
|
|
||||||
|
tdDnodes=cluster.dnodes
|
||||||
|
stopcount =0
|
||||||
|
threads=[]
|
||||||
|
|
||||||
|
# create stable:stb_0
|
||||||
|
threads.append(threading.Thread(target=self.insertData, args=(paraDict["dbName"],paraDict["ctbNum"],paraDict["rowsPerTbl"])))
|
||||||
|
for tr in threads:
|
||||||
|
tr.start()
|
||||||
|
TdSqlEx=tdCom.newTdSql()
|
||||||
|
tdLog.info("alter database db0_0 replica 3")
|
||||||
|
TdSqlEx.execute('alter database db0_0 replica 3')
|
||||||
|
while stopcount < restartNumbers:
|
||||||
|
tdLog.info(" restart loop: %d"%stopcount )
|
||||||
|
if stopRole == "mnode":
|
||||||
|
for i in range(mnodeNums):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "vnode":
|
||||||
|
for i in range(vnodeNumbers):
|
||||||
|
tdDnodes[i+mnodeNums].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i+mnodeNums].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "dnode":
|
||||||
|
for i in range(dnodeNumbers):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
# tdLog.info('select cast(c2 as nchar(10)) from db0_0.stb_1;')
|
||||||
|
# TdSqlEx.execute('select cast(c2 as nchar(10)) from db0_0.stb_1;')
|
||||||
|
# tdLog.info('select avg(c1) from db0_0.stb_0 interval(10s);')
|
||||||
|
# TdSqlEx.execute('select avg(c1) from db0_0.stb_0 interval(10s);')
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
# dnodeNumbers don't include database of schema
|
||||||
|
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||||
|
tdLog.info("123")
|
||||||
|
else:
|
||||||
|
print("456")
|
||||||
|
|
||||||
|
self.stopThread(threads)
|
||||||
|
tdLog.exit("one or more of dnodes failed to start ")
|
||||||
|
# self.check3mnode()
|
||||||
|
stopcount+=1
|
||||||
|
|
||||||
|
for tr in threads:
|
||||||
|
tr.join()
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
clusterComCheck.checkDbRows(dbNumbers)
|
||||||
|
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||||
|
|
||||||
|
# tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||||
|
tdSql.query("show %s.stables"%(paraDict["dbName"]))
|
||||||
|
tdSql.checkRows(paraDict["stbNumbers"])
|
||||||
|
for i in range(paraDict['stbNumbers']):
|
||||||
|
stableName= '%s.%s_%d'%(paraDict["dbName"],paraDict['stbName'],i)
|
||||||
|
tdSql.query("select count(*) from %s"%stableName)
|
||||||
|
tdSql.checkData(0,0,rowsPerStb)
|
||||||
|
clusterComCheck.check_vgroups_status(vgroup_numbers=paraDict["vgroups"],db_replica=3,db_name=paraDict["dbName"],count_number=240)
|
||||||
|
def run(self):
|
||||||
|
# print(self.master_dnode.cfgDict)
|
||||||
|
self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=4,stopRole='dnode')
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -336,7 +336,7 @@ class TDTestCase:
|
||||||
for i in range(expectRows):
|
for i in range(expectRows):
|
||||||
totalConsumeRows += resultList[i]
|
totalConsumeRows += resultList[i]
|
||||||
|
|
||||||
if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0:
|
if totalConsumeRows > expectrowcnt or totalConsumeRows <= 0:
|
||||||
tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt))
|
tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt))
|
||||||
tdLog.exit("tmq consume rows error!")
|
tdLog.exit("tmq consume rows error!")
|
||||||
|
|
||||||
|
|
|
@ -226,12 +226,11 @@ class TDTestCase:
|
||||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
tdLog.info("start consume processor")
|
tdLog.info("start consume processor")
|
||||||
pollDelay = 5
|
pollDelay = 10
|
||||||
showMsg = 1
|
showMsg = 1
|
||||||
showRow = 1
|
showRow = 1
|
||||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||||
|
|
||||||
time.sleep(5)
|
|
||||||
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
|
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
|
||||||
self.insert_data(tdSql,\
|
self.insert_data(tdSql,\
|
||||||
parameterDict["dbName"],\
|
parameterDict["dbName"],\
|
||||||
|
@ -307,7 +306,7 @@ class TDTestCase:
|
||||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
tdLog.info("start consume processor")
|
tdLog.info("start consume processor")
|
||||||
pollDelay = 5
|
pollDelay = 10
|
||||||
showMsg = 1
|
showMsg = 1
|
||||||
showRow = 1
|
showRow = 1
|
||||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||||
|
|
Loading…
Reference in New Issue