Merge branch '3.0' of github.com:taosdata/TDengine into 3.0
This commit is contained in:
commit
2733287087
|
@ -75,13 +75,18 @@ typedef uint16_t tmsg_t;
|
|||
#define TSDB_IE_TYPE_DNODE_EXT 6
|
||||
#define TSDB_IE_TYPE_DNODE_STATE 7
|
||||
|
||||
enum { CONN_TYPE__QUERY = 1, CONN_TYPE__TMQ, CONN_TYPE__UDFD, CONN_TYPE__MAX };
|
||||
enum {
|
||||
CONN_TYPE__QUERY = 1,
|
||||
CONN_TYPE__TMQ,
|
||||
CONN_TYPE__UDFD,
|
||||
CONN_TYPE__MAX,
|
||||
};
|
||||
|
||||
enum {
|
||||
HEARTBEAT_KEY_USER_AUTHINFO = 1,
|
||||
HEARTBEAT_KEY_DBINFO,
|
||||
HEARTBEAT_KEY_STBINFO,
|
||||
HEARTBEAT_KEY_MQ_TMP,
|
||||
HEARTBEAT_KEY_TMQ,
|
||||
};
|
||||
|
||||
typedef enum _mgmt_table {
|
||||
|
@ -2146,6 +2151,15 @@ typedef struct {
|
|||
char cgroup[TSDB_CGROUP_LEN];
|
||||
} SMqAskEpReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t consumerId;
|
||||
int32_t epoch;
|
||||
} SMqHbReq;
|
||||
|
||||
typedef struct {
|
||||
int8_t reserved;
|
||||
} SMqHbRsp;
|
||||
|
||||
typedef struct {
|
||||
int32_t key;
|
||||
int32_t valueLen;
|
||||
|
@ -2335,29 +2349,30 @@ static FORCE_INLINE int32_t tDecodeSClientHbKey(SDecoder* pDecoder, SClientHbKey
|
|||
return 0;
|
||||
}
|
||||
|
||||
typedef struct SMqHbVgInfo {
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
} SMqHbVgInfo;
|
||||
// TODO stas
|
||||
} SMqReportVgInfo;
|
||||
|
||||
static FORCE_INLINE int32_t taosEncodeSMqVgInfo(void** buf, const SMqHbVgInfo* pVgInfo) {
|
||||
static FORCE_INLINE int32_t taosEncodeSMqVgInfo(void** buf, const SMqReportVgInfo* pVgInfo) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI32(buf, pVgInfo->vgId);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* taosDecodeSMqVgInfo(void* buf, SMqHbVgInfo* pVgInfo) {
|
||||
static FORCE_INLINE void* taosDecodeSMqVgInfo(void* buf, SMqReportVgInfo* pVgInfo) {
|
||||
buf = taosDecodeFixedI32(buf, &pVgInfo->vgId);
|
||||
return buf;
|
||||
}
|
||||
|
||||
typedef struct SMqHbTopicInfo {
|
||||
typedef struct {
|
||||
int32_t epoch;
|
||||
int64_t topicUid;
|
||||
char name[TSDB_TOPIC_FNAME_LEN];
|
||||
SArray* pVgInfo; // SArray<SMqHbVgInfo>
|
||||
} SMqHbTopicInfo;
|
||||
} SMqTopicInfo;
|
||||
|
||||
static FORCE_INLINE int32_t taosEncodeSMqHbTopicInfoMsg(void** buf, const SMqHbTopicInfo* pTopicInfo) {
|
||||
static FORCE_INLINE int32_t taosEncodeSMqTopicInfoMsg(void** buf, const SMqTopicInfo* pTopicInfo) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI32(buf, pTopicInfo->epoch);
|
||||
tlen += taosEncodeFixedI64(buf, pTopicInfo->topicUid);
|
||||
|
@ -2365,35 +2380,35 @@ static FORCE_INLINE int32_t taosEncodeSMqHbTopicInfoMsg(void** buf, const SMqHbT
|
|||
int32_t sz = taosArrayGetSize(pTopicInfo->pVgInfo);
|
||||
tlen += taosEncodeFixedI32(buf, sz);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqHbVgInfo* pVgInfo = (SMqHbVgInfo*)taosArrayGet(pTopicInfo->pVgInfo, i);
|
||||
SMqReportVgInfo* pVgInfo = (SMqReportVgInfo*)taosArrayGet(pTopicInfo->pVgInfo, i);
|
||||
tlen += taosEncodeSMqVgInfo(buf, pVgInfo);
|
||||
}
|
||||
return tlen;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* taosDecodeSMqHbTopicInfoMsg(void* buf, SMqHbTopicInfo* pTopicInfo) {
|
||||
static FORCE_INLINE void* taosDecodeSMqTopicInfoMsg(void* buf, SMqTopicInfo* pTopicInfo) {
|
||||
buf = taosDecodeFixedI32(buf, &pTopicInfo->epoch);
|
||||
buf = taosDecodeFixedI64(buf, &pTopicInfo->topicUid);
|
||||
buf = taosDecodeStringTo(buf, pTopicInfo->name);
|
||||
int32_t sz;
|
||||
buf = taosDecodeFixedI32(buf, &sz);
|
||||
pTopicInfo->pVgInfo = taosArrayInit(sz, sizeof(SMqHbVgInfo));
|
||||
pTopicInfo->pVgInfo = taosArrayInit(sz, sizeof(SMqReportVgInfo));
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqHbVgInfo vgInfo;
|
||||
SMqReportVgInfo vgInfo;
|
||||
buf = taosDecodeSMqVgInfo(buf, &vgInfo);
|
||||
taosArrayPush(pTopicInfo->pVgInfo, &vgInfo);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
typedef struct SMqHbMsg {
|
||||
typedef struct {
|
||||
int32_t status; // ask hb endpoint
|
||||
int32_t epoch;
|
||||
int64_t consumerId;
|
||||
SArray* pTopics; // SArray<SMqHbTopicInfo>
|
||||
} SMqHbMsg;
|
||||
} SMqReportReq;
|
||||
|
||||
static FORCE_INLINE int32_t taosEncodeSMqMsg(void** buf, const SMqHbMsg* pMsg) {
|
||||
static FORCE_INLINE int32_t taosEncodeSMqReportMsg(void** buf, const SMqReportReq* pMsg) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI32(buf, pMsg->status);
|
||||
tlen += taosEncodeFixedI32(buf, pMsg->epoch);
|
||||
|
@ -2401,22 +2416,22 @@ static FORCE_INLINE int32_t taosEncodeSMqMsg(void** buf, const SMqHbMsg* pMsg) {
|
|||
int32_t sz = taosArrayGetSize(pMsg->pTopics);
|
||||
tlen += taosEncodeFixedI32(buf, sz);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqHbTopicInfo* topicInfo = (SMqHbTopicInfo*)taosArrayGet(pMsg->pTopics, i);
|
||||
tlen += taosEncodeSMqHbTopicInfoMsg(buf, topicInfo);
|
||||
SMqTopicInfo* topicInfo = (SMqTopicInfo*)taosArrayGet(pMsg->pTopics, i);
|
||||
tlen += taosEncodeSMqTopicInfoMsg(buf, topicInfo);
|
||||
}
|
||||
return tlen;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* taosDecodeSMqMsg(void* buf, SMqHbMsg* pMsg) {
|
||||
static FORCE_INLINE void* taosDecodeSMqReportMsg(void* buf, SMqReportReq* pMsg) {
|
||||
buf = taosDecodeFixedI32(buf, &pMsg->status);
|
||||
buf = taosDecodeFixedI32(buf, &pMsg->epoch);
|
||||
buf = taosDecodeFixedI64(buf, &pMsg->consumerId);
|
||||
int32_t sz;
|
||||
buf = taosDecodeFixedI32(buf, &sz);
|
||||
pMsg->pTopics = taosArrayInit(sz, sizeof(SMqHbTopicInfo));
|
||||
pMsg->pTopics = taosArrayInit(sz, sizeof(SMqTopicInfo));
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
SMqHbTopicInfo topicInfo;
|
||||
buf = taosDecodeSMqHbTopicInfoMsg(buf, &topicInfo);
|
||||
SMqTopicInfo topicInfo;
|
||||
buf = taosDecodeSMqTopicInfoMsg(buf, &topicInfo);
|
||||
taosArrayPush(pMsg->pTopics, &topicInfo);
|
||||
}
|
||||
return buf;
|
||||
|
@ -2921,89 +2936,6 @@ typedef struct {
|
|||
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
|
||||
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
|
||||
|
||||
#if 0
|
||||
typedef struct {
|
||||
SMqRspHead head;
|
||||
int64_t reqOffset;
|
||||
int64_t rspOffset;
|
||||
int32_t skipLogNum;
|
||||
int32_t blockNum;
|
||||
int8_t withTbName;
|
||||
int8_t withSchema;
|
||||
SArray* blockDataLen; // SArray<int32_t>
|
||||
SArray* blockData; // SArray<SRetrieveTableRsp*>
|
||||
SArray* blockTbName; // SArray<char*>
|
||||
SArray* blockSchema; // SArray<SSchemaWrapper>
|
||||
} SMqDataBlkRsp;
|
||||
|
||||
static FORCE_INLINE int32_t tEncodeSMqDataBlkRsp(void** buf, const SMqDataBlkRsp* pRsp) {
|
||||
int32_t tlen = 0;
|
||||
tlen += taosEncodeFixedI64(buf, pRsp->reqOffset);
|
||||
tlen += taosEncodeFixedI64(buf, pRsp->rspOffset);
|
||||
tlen += taosEncodeFixedI32(buf, pRsp->skipLogNum);
|
||||
tlen += taosEncodeFixedI32(buf, pRsp->blockNum);
|
||||
if (pRsp->blockNum != 0) {
|
||||
tlen += taosEncodeFixedI8(buf, pRsp->withTbName);
|
||||
tlen += taosEncodeFixedI8(buf, pRsp->withSchema);
|
||||
|
||||
for (int32_t i = 0; i < pRsp->blockNum; i++) {
|
||||
int32_t bLen = *(int32_t*)taosArrayGet(pRsp->blockDataLen, i);
|
||||
void* data = taosArrayGetP(pRsp->blockData, i);
|
||||
tlen += taosEncodeFixedI32(buf, bLen);
|
||||
tlen += taosEncodeBinary(buf, data, bLen);
|
||||
if (pRsp->withSchema) {
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRsp->blockSchema, i);
|
||||
tlen += taosEncodeSSchemaWrapper(buf, pSW);
|
||||
}
|
||||
if (pRsp->withTbName) {
|
||||
char* tbName = (char*)taosArrayGetP(pRsp->blockTbName, i);
|
||||
tlen += taosEncodeString(buf, tbName);
|
||||
}
|
||||
}
|
||||
}
|
||||
return tlen;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* pRsp) {
|
||||
buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
|
||||
buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
|
||||
buf = taosDecodeFixedI32(buf, &pRsp->skipLogNum);
|
||||
buf = taosDecodeFixedI32(buf, &pRsp->blockNum);
|
||||
if (pRsp->blockNum != 0) {
|
||||
pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void*));
|
||||
pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
|
||||
buf = taosDecodeFixedI8(buf, &pRsp->withTbName);
|
||||
buf = taosDecodeFixedI8(buf, &pRsp->withSchema);
|
||||
if (pRsp->withTbName) {
|
||||
pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*));
|
||||
}
|
||||
if (pRsp->withSchema) {
|
||||
pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*));
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pRsp->blockNum; i++) {
|
||||
int32_t bLen = 0;
|
||||
void* data = NULL;
|
||||
buf = taosDecodeFixedI32(buf, &bLen);
|
||||
buf = taosDecodeBinary(buf, &data, bLen);
|
||||
taosArrayPush(pRsp->blockDataLen, &bLen);
|
||||
taosArrayPush(pRsp->blockData, &data);
|
||||
if (pRsp->withSchema) {
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosMemoryMalloc(sizeof(SSchemaWrapper));
|
||||
buf = taosDecodeSSchemaWrapper(buf, pSW);
|
||||
taosArrayPush(pRsp->blockSchema, &pSW);
|
||||
}
|
||||
if (pRsp->withTbName) {
|
||||
char* name = NULL;
|
||||
buf = taosDecodeString(buf, &name);
|
||||
taosArrayPush(pRsp->blockTbName, &name);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (void*)buf;
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
SMqRspHead head;
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
|
|
|
@ -144,6 +144,7 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_ASK_EP, "ask-ep", SMqAskEpReq, SMqAskEpRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_RECOVER, "consumer-recover", SMqConsumerRecoverMsg, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_HB, "consumer-hb", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MQ_COMMIT_OFFSET, "mnode-commit-offset", SMqCMCommitOffsetReq, SMqCMCommitOffsetRsp)
|
||||
|
|
|
@ -113,6 +113,7 @@ int32_t twaScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *
|
|||
int32_t mavgScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t hllScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t csumScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t diffScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg
|
|||
int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
||||
void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
|
||||
int64_t rpcAllocHandle();
|
||||
void* rpcAllocHandle();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -332,7 +332,6 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_VND_INVALID_TABLE_ACTION TAOS_DEF_ERROR_CODE(0, 0x0519)
|
||||
#define TSDB_CODE_VND_COL_ALREADY_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051a)
|
||||
#define TSDB_CODE_VND_TABLE_COL_NOT_EXISTS TAOS_DEF_ERROR_CODE(0, 0x051b)
|
||||
#define TSDB_CODE_VND_READ_END TAOS_DEF_ERROR_CODE(0, 0x051c)
|
||||
|
||||
// tsdb
|
||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||
|
|
|
@ -171,6 +171,7 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
|||
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
|
||||
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
|
||||
pTscObj->connId = pRsp->query->connId;
|
||||
tscTrace("conn %p hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes, pTscObj->pAppInfo->totalDnodes);
|
||||
|
||||
if (pRsp->query->killRid) {
|
||||
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
|
||||
|
@ -294,6 +295,7 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
|
|||
|
||||
if (code != 0) {
|
||||
(*pInst)->onlineDnodes = ((*pInst)->totalDnodes ? 0 : -1);
|
||||
tscDebug("hb rsp error %s, update server status %d/%d", tstrerror(code), (*pInst)->onlineDnodes, (*pInst)->totalDnodes);
|
||||
}
|
||||
|
||||
if (rspNum) {
|
||||
|
@ -571,7 +573,8 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void hbMgrInitMqHbHandle() {
|
||||
static FORCE_INLINE void hbMgrInitHandle() {
|
||||
// init all handle
|
||||
clientHbMgr.reqHandle[CONN_TYPE__QUERY] = hbQueryHbReqHandle;
|
||||
clientHbMgr.reqHandle[CONN_TYPE__TMQ] = hbMqHbReqHandle;
|
||||
|
||||
|
@ -579,11 +582,6 @@ void hbMgrInitMqHbHandle() {
|
|||
clientHbMgr.rspHandle[CONN_TYPE__TMQ] = hbMqHbRspHandle;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void hbMgrInitHandle() {
|
||||
// init all handle
|
||||
hbMgrInitMqHbHandle();
|
||||
}
|
||||
|
||||
SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
|
||||
SClientHbBatchReq *pBatchReq = taosMemoryCalloc(1, sizeof(SClientHbBatchReq));
|
||||
if (pBatchReq == NULL) {
|
||||
|
|
|
@ -1276,7 +1276,12 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
assert(pMsg->info.ahandle != NULL);
|
||||
STscObj* pTscObj = NULL;
|
||||
|
||||
tscDebug("processMsgFromServer message: %s, code: %s", TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code));
|
||||
STraceId* trace = &pMsg->info.traceId;
|
||||
char tbuf[40] = {0};
|
||||
TRACE_TO_STR(trace, tbuf);
|
||||
|
||||
tscDebug("processMsgFromServer handle %p, message: %s, code: %s, gtid: %s", pMsg->info.handle, TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code),
|
||||
tbuf);
|
||||
|
||||
if (pSendInfo->requestObjRefId != 0) {
|
||||
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1752,7 +1752,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
|||
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
int32_t len = 0;
|
||||
len += snprintf(dumpBuf + len, size - len, "%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld|======\n", "dumpBlockData",
|
||||
len += snprintf(dumpBuf + len, size - len, "===stream===%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld|\n", flag,
|
||||
(int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId,
|
||||
pDataBlock->info.uid);
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
|
|
|
@ -196,6 +196,7 @@ SArray *mmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_TOPIC, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_SUBSCRIBE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_ASK_EP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_HB, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_COMMIT_OFFSET, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -81,6 +81,7 @@ int32_t qmPutRpcMsgToQueue(SQnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
taosWriteQitem(pMgmt->queryWorker.queue, pMsg);
|
||||
return 0;
|
||||
case READ_QUEUE:
|
||||
case FETCH_QUEUE:
|
||||
dTrace("msg:%p, is created and will put into qnode-fetch queue", pMsg);
|
||||
taosWriteQitem(pMgmt->fetchWorker.queue, pMsg);
|
||||
return 0;
|
||||
|
|
|
@ -48,6 +48,7 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter);
|
|||
|
||||
static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg);
|
||||
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg);
|
||||
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg);
|
||||
static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg);
|
||||
static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg);
|
||||
static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg);
|
||||
|
@ -62,6 +63,7 @@ int32_t mndInitConsumer(SMnode *pMnode) {
|
|||
.deleteFp = (SdbDeleteFp)mndConsumerActionDelete};
|
||||
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_SUBSCRIBE, mndProcessSubscribeReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_MQ_HB, mndProcessMqHbReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_MQ_ASK_EP, mndProcessAskEpReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_MQ_TIMER, mndProcessMqTimerMsg);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_MQ_CONSUMER_LOST, mndProcessConsumerLostMsg);
|
||||
|
@ -255,24 +257,15 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
SMqAskEpReq *pReq = (SMqAskEpReq *)pMsg->pCont;
|
||||
SMqAskEpRsp rsp = {0};
|
||||
int64_t consumerId = be64toh(pReq->consumerId);
|
||||
int32_t epoch = ntohl(pReq->epoch);
|
||||
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
SMqHbReq *pReq = (SMqHbReq *)pMsg->pCont;
|
||||
int64_t consumerId = be64toh(pReq->consumerId);
|
||||
|
||||
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMsg->info.node, consumerId);
|
||||
if (pConsumer == NULL) {
|
||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
|
||||
|
||||
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
|
||||
/*int32_t hbStatus = atomic_load_32(&pConsumer->hbStatus);*/
|
||||
atomic_store_32(&pConsumer->hbStatus, 0);
|
||||
|
||||
// 1. check consumer status
|
||||
int32_t status = atomic_load_32(&pConsumer->status);
|
||||
|
||||
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||
|
@ -286,6 +279,46 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
|
||||
}
|
||||
|
||||
mndReleaseConsumer(pMnode, pConsumer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
SMqAskEpReq *pReq = (SMqAskEpReq *)pMsg->pCont;
|
||||
SMqAskEpRsp rsp = {0};
|
||||
int64_t consumerId = be64toh(pReq->consumerId);
|
||||
int32_t epoch = ntohl(pReq->epoch);
|
||||
|
||||
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
|
||||
if (pConsumer == NULL) {
|
||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
|
||||
|
||||
#if 1
|
||||
atomic_store_32(&pConsumer->hbStatus, 0);
|
||||
#endif
|
||||
|
||||
// 1. check consumer status
|
||||
int32_t status = atomic_load_32(&pConsumer->status);
|
||||
|
||||
#if 1
|
||||
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
|
||||
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
|
||||
|
||||
pRecoverMsg->consumerId = consumerId;
|
||||
SRpcMsg *pRpcMsg = taosMemoryCalloc(1, sizeof(SRpcMsg));
|
||||
pRpcMsg->msgType = TDMT_MND_MQ_CONSUMER_RECOVER;
|
||||
pRpcMsg->pCont = pRecoverMsg;
|
||||
pRpcMsg->contLen = sizeof(SMqConsumerRecoverMsg);
|
||||
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (status != MQ_CONSUMER_STATUS__READY) {
|
||||
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||
return -1;
|
||||
|
|
|
@ -388,67 +388,7 @@ static void mndCancelGetNextApp(SMnode *pMnode, void *pIter) {
|
|||
}
|
||||
|
||||
static SClientHbRsp *mndMqHbBuildRsp(SMnode *pMnode, SClientHbReq *pReq) {
|
||||
#if 0
|
||||
SClientHbRsp* pRsp = taosMemoryMalloc(sizeof(SClientHbRsp));
|
||||
if (pRsp == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
pRsp->connKey = pReq->connKey;
|
||||
SMqHbBatchRsp batchRsp;
|
||||
batchRsp.batchRsps = taosArrayInit(0, sizeof(SMqHbRsp));
|
||||
if (batchRsp.batchRsps == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
SClientHbKey connKey = pReq->connKey;
|
||||
SHashObj* pObj = pReq->info;
|
||||
SKv* pKv = taosHashGet(pObj, "mq-tmp", strlen("mq-tmp") + 1);
|
||||
if (pKv == NULL) {
|
||||
taosMemoryFree(pRsp);
|
||||
return NULL;
|
||||
}
|
||||
SMqHbMsg mqHb;
|
||||
taosDecodeSMqMsg(pKv->value, &mqHb);
|
||||
/*int64_t clientUid = htonl(pKv->value);*/
|
||||
/*if (mqHb.epoch )*/
|
||||
int sz = taosArrayGetSize(mqHb.pTopics);
|
||||
SMqConsumerObj* pConsumer = mndAcquireConsumer(pMnode, mqHb.consumerId);
|
||||
for (int i = 0; i < sz; i++) {
|
||||
SMqHbOneTopicBatchRsp innerBatchRsp;
|
||||
innerBatchRsp.rsps = taosArrayInit(sz, sizeof(SMqHbRsp));
|
||||
if (innerBatchRsp.rsps == NULL) {
|
||||
//TODO
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
SMqHbTopicInfo* topicInfo = taosArrayGet(mqHb.pTopics, i);
|
||||
SMqConsumerTopic* pConsumerTopic = taosHashGet(pConsumer->topicHash, topicInfo->name, strlen(topicInfo->name)+1);
|
||||
if (pConsumerTopic->epoch != topicInfo->epoch) {
|
||||
//add new vgids into rsp
|
||||
int vgSz = taosArrayGetSize(topicInfo->pVgInfo);
|
||||
for (int j = 0; j < vgSz; j++) {
|
||||
SMqHbRsp innerRsp;
|
||||
SMqHbVgInfo* pVgInfo = taosArrayGet(topicInfo->pVgInfo, i);
|
||||
SVgObj* pVgObj = mndAcquireVgroup(pMnode, pVgInfo->vgId);
|
||||
innerRsp.epSet = mndGetVgroupEpset(pMnode, pVgObj);
|
||||
taosArrayPush(innerBatchRsp.rsps, &innerRsp);
|
||||
}
|
||||
}
|
||||
taosArrayPush(batchRsp.batchRsps, &innerBatchRsp);
|
||||
}
|
||||
int32_t tlen = taosEncodeSMqHbBatchRsp(NULL, &batchRsp);
|
||||
void* buf = taosMemoryMalloc(tlen);
|
||||
if (buf == NULL) {
|
||||
//TODO
|
||||
return NULL;
|
||||
}
|
||||
void* abuf = buf;
|
||||
taosEncodeSMqHbBatchRsp(&abuf, &batchRsp);
|
||||
pRsp->body = buf;
|
||||
pRsp->bodyLen = tlen;
|
||||
return pRsp;
|
||||
#endif
|
||||
//
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -709,7 +709,7 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) {
|
|||
|
||||
_OVER:
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
mError("sma:%s, failed to create since %s", createReq.name, terrstr(terrno));
|
||||
mError("sma:%s, failed to create since %s", createReq.name, terrstr());
|
||||
}
|
||||
|
||||
mndReleaseStb(pMnode, pStb);
|
||||
|
|
|
@ -639,6 +639,7 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
|||
if (pShow->pIter == NULL) break;
|
||||
|
||||
if (pDb != NULL && pVgroup->dbUid != pDb->uid) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ void *tsdbGetIdx(SMeta *pMeta);
|
|||
void *tsdbGetIvtIdx(SMeta *pMeta);
|
||||
|
||||
int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
|
||||
int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray* pTableUids);
|
||||
int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
|
||||
int32_t tsdbLastrowReaderClose(void *pReader);
|
||||
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@ extern "C" {
|
|||
#define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
#define RSMA_TASK_INFO_HASH_SLOT 8
|
||||
|
||||
typedef struct SSmaEnv SSmaEnv;
|
||||
typedef struct SSmaStat SSmaStat;
|
||||
typedef struct STSmaStat STSmaStat;
|
||||
|
@ -41,7 +43,7 @@ typedef struct SRSmaInfo SRSmaInfo;
|
|||
typedef struct SRSmaInfoItem SRSmaInfoItem;
|
||||
|
||||
struct SSmaEnv {
|
||||
TdThreadRwlock lock;
|
||||
SRWLatch lock;
|
||||
int8_t type;
|
||||
SSmaStat *pStat;
|
||||
};
|
||||
|
@ -52,7 +54,7 @@ typedef struct {
|
|||
void *tmrHandle; // shared by all fetch tasks
|
||||
} SSmaMgmt;
|
||||
|
||||
#define SMA_ENV_LOCK(env) ((env)->lock)
|
||||
#define SMA_ENV_LOCK(env) (&(env)->lock)
|
||||
#define SMA_ENV_TYPE(env) ((env)->type)
|
||||
#define SMA_ENV_STAT(env) ((env)->pStat)
|
||||
|
||||
|
@ -64,10 +66,14 @@ struct STSmaStat {
|
|||
|
||||
struct SRSmaStat {
|
||||
SSma *pSma;
|
||||
int64_t submitVer;
|
||||
int64_t refId; // shared by fetch tasks
|
||||
int8_t triggerStat; // shared by fetch tasks
|
||||
SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
|
||||
int64_t commitAppliedVer; // vnode applied version for async commit
|
||||
int64_t commitSubmitVer; // rsma submit version for async commit
|
||||
int64_t submitVer; // latest submit version
|
||||
int64_t refId; // shared by fetch tasks
|
||||
int8_t triggerStat; // shared by fetch tasks
|
||||
int8_t commitStat; // 0 not in committing, 1 in committing
|
||||
SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
|
||||
SHashObj *iRsmaInfoHash; // key: stbUid, value: SRSmaInfo; immutable rsmaInfoHash
|
||||
};
|
||||
|
||||
struct SSmaStat {
|
||||
|
@ -78,12 +84,29 @@ struct SSmaStat {
|
|||
T_REF_DECLARE()
|
||||
};
|
||||
|
||||
#define SMA_TSMA_STAT(s) (&(s)->tsmaStat)
|
||||
#define SMA_RSMA_STAT(s) (&(s)->rsmaStat)
|
||||
#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash)
|
||||
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
|
||||
#define RSMA_REF_ID(r) ((r)->refId)
|
||||
#define RSMA_SUBMIT_VER(r) ((r)->submitVer)
|
||||
#define SMA_TSMA_STAT(s) (&(s)->tsmaStat)
|
||||
#define SMA_RSMA_STAT(s) (&(s)->rsmaStat)
|
||||
#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash)
|
||||
#define RSMA_IMU_INFO_HASH(r) ((r)->iRsmaInfoHash)
|
||||
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
|
||||
#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
|
||||
#define RSMA_REF_ID(r) ((r)->refId)
|
||||
#define RSMA_SUBMIT_VER(r) ((r)->submitVer)
|
||||
|
||||
struct SRSmaInfoItem {
|
||||
void *taskInfo; // qTaskInfo_t
|
||||
int64_t refId;
|
||||
tmr_h tmrId;
|
||||
int32_t maxDelay;
|
||||
int8_t level;
|
||||
int8_t triggerStat;
|
||||
};
|
||||
|
||||
struct SRSmaInfo {
|
||||
STSchema *pTSchema;
|
||||
int64_t suid;
|
||||
SRSmaInfoItem items[TSDB_RETENTION_L2];
|
||||
};
|
||||
|
||||
enum {
|
||||
TASK_TRIGGER_STAT_INIT = 0,
|
||||
|
@ -94,6 +117,14 @@ enum {
|
|||
TASK_TRIGGER_STAT_DROPPED = 5,
|
||||
};
|
||||
|
||||
enum {
|
||||
RSMA_ROLE_CREATE = 0,
|
||||
RSMA_ROLE_DROP = 1,
|
||||
RSMA_ROLE_FETCH = 2,
|
||||
RSMA_ROLE_SUBMIT = 3,
|
||||
RSMA_ROLE_ITERATE = 4,
|
||||
};
|
||||
|
||||
void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
|
||||
void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
|
||||
|
||||
|
@ -112,33 +143,6 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType);
|
|||
int32_t tdLockSma(SSma *pSma);
|
||||
int32_t tdUnLockSma(SSma *pSma);
|
||||
|
||||
static FORCE_INLINE int32_t tdRLockSmaEnv(SSmaEnv *pEnv) {
|
||||
int code = taosThreadRwlockRdlock(&(pEnv->lock));
|
||||
if (code != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(code);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tdWLockSmaEnv(SSmaEnv *pEnv) {
|
||||
int code = taosThreadRwlockWrlock(&(pEnv->lock));
|
||||
if (code != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(code);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tdUnLockSmaEnv(SSmaEnv *pEnv) {
|
||||
int code = taosThreadRwlockUnlock(&(pEnv->lock));
|
||||
if (code != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(code);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int8_t tdSmaStat(STSmaStat *pTStat) {
|
||||
if (pTStat) {
|
||||
return atomic_load_8(&pTStat->state);
|
||||
|
@ -184,10 +188,12 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc);
|
||||
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
|
||||
static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
|
||||
void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
|
||||
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
|
||||
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat);
|
||||
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
|
||||
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
|
||||
|
||||
int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
|
||||
int32_t tdProcessRSmaRestoreImpl(SSma *pSma);
|
||||
|
|
|
@ -24,12 +24,12 @@ extern "C" {
|
|||
|
||||
// tsdbDebug ================
|
||||
// clang-format off
|
||||
#define tsdbFatal(...) do { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TSDB FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbError(...) do { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TSDB ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbWarn(...) do { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TSDB WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TSDB ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSDB ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSDB ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tsdbFatal(...) do { if (tsdbDebugFlag & DEBUG_FATAL) { taosPrintLog("TSD FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbError(...) do { if (tsdbDebugFlag & DEBUG_ERROR) { taosPrintLog("TSD ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbWarn(...) do { if (tsdbDebugFlag & DEBUG_WARN) { taosPrintLog("TSD WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbInfo(...) do { if (tsdbDebugFlag & DEBUG_INFO) { taosPrintLog("TSD ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||
#define tsdbDebug(...) do { if (tsdbDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSD ", DEBUG_DEBUG, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSD ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
|
||||
// clang-format on
|
||||
|
||||
typedef struct TSDBROW TSDBROW;
|
||||
|
@ -115,7 +115,6 @@ int32_t tGetBlock(uint8_t *p, void *ph);
|
|||
int32_t tBlockCmprFn(const void *p1, const void *p2);
|
||||
bool tBlockHasSma(SBlock *pBlock);
|
||||
// SBlockIdx
|
||||
void tBlockIdxReset(SBlockIdx *pBlockIdx);
|
||||
int32_t tPutBlockIdx(uint8_t *p, void *ph);
|
||||
int32_t tGetBlockIdx(uint8_t *p, void *ph);
|
||||
int32_t tCmprBlockIdx(void const *lhs, void const *rhs);
|
||||
|
@ -126,6 +125,8 @@ void tColDataClear(void *ph);
|
|||
int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
|
||||
int32_t tColDataGetValue(SColData *pColData, int32_t iRow, SColVal *pColVal);
|
||||
int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
|
||||
int32_t tPutColData(uint8_t *p, SColData *pColData);
|
||||
int32_t tGetColData(uint8_t *p, SColData *pColData);
|
||||
// SBlockData
|
||||
#define tBlockDataFirstRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, 0)
|
||||
#define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1)
|
||||
|
@ -134,14 +135,17 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
|
|||
int32_t tBlockDataInit(SBlockData *pBlockData);
|
||||
void tBlockDataReset(SBlockData *pBlockData);
|
||||
int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema);
|
||||
int32_t tBlockDataCorrectSchema(SBlockData *pBlockData, SBlockData *pBlockDataFrom);
|
||||
void tBlockDataClearData(SBlockData *pBlockData);
|
||||
void tBlockDataClear(SBlockData *pBlockData);
|
||||
void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear);
|
||||
int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData);
|
||||
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema);
|
||||
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData);
|
||||
int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest);
|
||||
SColData *tBlockDataGetColDataByIdx(SBlockData *pBlockData, int32_t idx);
|
||||
void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColData);
|
||||
int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData);
|
||||
int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData);
|
||||
// SDelIdx
|
||||
int32_t tPutDelIdx(uint8_t *p, void *ph);
|
||||
int32_t tGetDelIdx(uint8_t *p, void *ph);
|
||||
|
@ -202,7 +206,7 @@ int32_t tsdbFSStateUpsertDelFile(STsdbFSState *pState, SDelFile *pDelFile);
|
|||
int32_t tsdbFSStateUpsertDFileSet(STsdbFSState *pState, SDFileSet *pSet);
|
||||
void tsdbFSStateDeleteDFileSet(STsdbFSState *pState, int32_t fid);
|
||||
SDelFile *tsdbFSStateGetDelFile(STsdbFSState *pState);
|
||||
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid);
|
||||
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid, int32_t flag);
|
||||
// tsdbReaderWriter.c ==============================================================================================
|
||||
// SDataFWriter
|
||||
int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet);
|
||||
|
@ -357,10 +361,6 @@ struct TSDBROW {
|
|||
struct SBlockIdx {
|
||||
int64_t suid;
|
||||
int64_t uid;
|
||||
TSKEY minKey;
|
||||
TSKEY maxKey;
|
||||
int64_t minVersion;
|
||||
int64_t maxVersion;
|
||||
int64_t offset;
|
||||
int64_t size;
|
||||
};
|
||||
|
|
|
@ -164,9 +164,12 @@ void smaCleanUp();
|
|||
int32_t smaOpen(SVnode* pVnode);
|
||||
int32_t smaClose(SSma* pSma);
|
||||
int32_t smaBegin(SSma* pSma);
|
||||
int32_t smaPreCommit(SSma* pSma);
|
||||
int32_t smaCommit(SSma* pSma);
|
||||
int32_t smaPostCommit(SSma* pSma);
|
||||
int32_t smaSyncPreCommit(SSma* pSma);
|
||||
int32_t smaSyncCommit(SSma* pSma);
|
||||
int32_t smaSyncPostCommit(SSma* pSma);
|
||||
int32_t smaAsyncPreCommit(SSma* pSma);
|
||||
int32_t smaAsyncCommit(SSma* pSma);
|
||||
int32_t smaAsyncPostCommit(SSma* pSma);
|
||||
|
||||
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
|
||||
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
|
||||
|
@ -309,6 +312,7 @@ void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);
|
|||
|
||||
struct SSnapDataHdr {
|
||||
int8_t type;
|
||||
int64_t index;
|
||||
int64_t size;
|
||||
uint8_t data[];
|
||||
};
|
||||
|
|
|
@ -26,60 +26,72 @@ struct SMetaSnapReader {
|
|||
int32_t metaSnapReaderOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapReader** ppReader) {
|
||||
int32_t code = 0;
|
||||
int32_t c = 0;
|
||||
SMetaSnapReader* pMetaSnapReader = NULL;
|
||||
SMetaSnapReader* pReader = NULL;
|
||||
|
||||
// alloc
|
||||
pMetaSnapReader = (SMetaSnapReader*)taosMemoryCalloc(1, sizeof(*pMetaSnapReader));
|
||||
if (pMetaSnapReader == NULL) {
|
||||
pReader = (SMetaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
|
||||
if (pReader == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
pMetaSnapReader->pMeta = pMeta;
|
||||
pMetaSnapReader->sver = sver;
|
||||
pMetaSnapReader->ever = ever;
|
||||
pReader->pMeta = pMeta;
|
||||
pReader->sver = sver;
|
||||
pReader->ever = ever;
|
||||
|
||||
// impl
|
||||
code = tdbTbcOpen(pMeta->pTbDb, &pMetaSnapReader->pTbc, NULL);
|
||||
code = tdbTbcOpen(pMeta->pTbDb, &pReader->pTbc, NULL);
|
||||
if (code) {
|
||||
taosMemoryFree(pReader);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
code = tdbTbcMoveTo(pMetaSnapReader->pTbc, &(STbDbKey){.version = sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c);
|
||||
code = tdbTbcMoveTo(pReader->pTbc, &(STbDbKey){.version = sver, .uid = INT64_MIN}, sizeof(STbDbKey), &c);
|
||||
if (code) {
|
||||
taosMemoryFree(pReader);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
*ppReader = pMetaSnapReader;
|
||||
metaInfo("vgId:%d vnode snapshot meta reader opened", TD_VID(pMeta->pVnode));
|
||||
|
||||
*ppReader = pReader;
|
||||
return code;
|
||||
|
||||
_err:
|
||||
metaError("vgId:%d meta snap reader open failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
|
||||
metaError("vgId:%d vnode snapshot meta reader open failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
|
||||
*ppReader = NULL;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t metaSnapReaderClose(SMetaSnapReader** ppReader) {
|
||||
int32_t code = 0;
|
||||
|
||||
tdbTbcClose((*ppReader)->pTbc);
|
||||
taosMemoryFree(*ppReader);
|
||||
*ppReader = NULL;
|
||||
return 0;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
|
||||
int32_t code = 0;
|
||||
const void* pKey = NULL;
|
||||
const void* pData = NULL;
|
||||
int32_t nKey = 0;
|
||||
int32_t nData = 0;
|
||||
int32_t code = 0;
|
||||
STbDbKey key;
|
||||
|
||||
*ppData = NULL;
|
||||
for (;;) {
|
||||
code = tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData);
|
||||
if (code || ((STbDbKey*)pData)->version > pReader->ever) {
|
||||
code = TSDB_CODE_VND_READ_END;
|
||||
if (tdbTbcGet(pReader->pTbc, &pKey, &nKey, &pData, &nData)) {
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (((STbDbKey*)pData)->version < pReader->sver) {
|
||||
key = ((STbDbKey*)pKey)[0];
|
||||
if (key.version > pReader->ever) {
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (key.version < pReader->sver) {
|
||||
tdbTbcMoveToNext(pReader->pTbc);
|
||||
continue;
|
||||
}
|
||||
|
@ -88,17 +100,28 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
|
|||
break;
|
||||
}
|
||||
|
||||
// copy the data
|
||||
if (tRealloc(ppData, sizeof(SSnapDataHdr) + nData) < 0) {
|
||||
ASSERT(pData && nData);
|
||||
|
||||
*ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + nData);
|
||||
if (*ppData == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return code;
|
||||
goto _err;
|
||||
}
|
||||
((SSnapDataHdr*)(*ppData))->type = 0; // TODO: use macro
|
||||
((SSnapDataHdr*)(*ppData))->size = nData;
|
||||
memcpy(((SSnapDataHdr*)(*ppData))->data, pData, nData);
|
||||
|
||||
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
|
||||
pHdr->type = 0; // TODO: use macro
|
||||
pHdr->size = nData;
|
||||
memcpy(pHdr->data, pData, nData);
|
||||
|
||||
metaInfo("vgId:%d vnode snapshot meta read data, version:%" PRId64 " uid:%" PRId64 " nData:%d",
|
||||
TD_VID(pReader->pMeta->pVnode), key.version, key.uid, nData);
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
|
||||
_err:
|
||||
metaError("vgId:%d vnode snapshot meta read data failed since %s", TD_VID(pReader->pMeta->pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
// SMetaSnapWriter ========================================
|
||||
|
@ -108,18 +131,6 @@ struct SMetaSnapWriter {
|
|||
int64_t ever;
|
||||
};
|
||||
|
||||
static int32_t metaSnapRollback(SMetaSnapWriter* pWriter) {
|
||||
int32_t code = 0;
|
||||
// TODO
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t metaSnapCommit(SMetaSnapWriter* pWriter) {
|
||||
int32_t code = 0;
|
||||
// TODO
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWriter** ppWriter) {
|
||||
int32_t code = 0;
|
||||
SMetaSnapWriter* pWriter;
|
||||
|
@ -148,10 +159,9 @@ int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback) {
|
|||
SMetaSnapWriter* pWriter = *ppWriter;
|
||||
|
||||
if (rollback) {
|
||||
code = metaSnapRollback(pWriter);
|
||||
if (code) goto _err;
|
||||
ASSERT(0);
|
||||
} else {
|
||||
code = metaSnapCommit(pWriter);
|
||||
code = metaCommit(pWriter->pMeta);
|
||||
if (code) goto _err;
|
||||
}
|
||||
taosMemoryFree(pWriter);
|
||||
|
@ -170,15 +180,16 @@ int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
|
|||
SMetaEntry metaEntry = {0};
|
||||
SDecoder* pDecoder = &(SDecoder){0};
|
||||
|
||||
tDecoderInit(pDecoder, pData, nData);
|
||||
tDecoderInit(pDecoder, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
|
||||
metaDecodeEntry(pDecoder, &metaEntry);
|
||||
|
||||
code = metaHandleEntry(pMeta, &metaEntry);
|
||||
if (code) goto _err;
|
||||
|
||||
tDecoderClear(pDecoder);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
metaError("vgId:%d meta snapshot write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
|
||||
metaError("vgId:%d vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
|
@ -15,9 +15,13 @@
|
|||
|
||||
#include "sma.h"
|
||||
|
||||
static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma);
|
||||
static int32_t tdProcessRSmaCommitImpl(SSma *pSma);
|
||||
static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma);
|
||||
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma);
|
||||
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma);
|
||||
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma);
|
||||
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma);
|
||||
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma);
|
||||
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma);
|
||||
static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat);
|
||||
|
||||
/**
|
||||
* @brief Only applicable to Rollup SMA
|
||||
|
@ -25,7 +29,7 @@ static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma);
|
|||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaPreCommit(SSma *pSma) { return tdProcessRSmaPreCommitImpl(pSma); }
|
||||
int32_t smaSyncPreCommit(SSma *pSma) { return tdProcessRSmaSyncPreCommitImpl(pSma); }
|
||||
|
||||
/**
|
||||
* @brief Only applicable to Rollup SMA
|
||||
|
@ -33,7 +37,7 @@ int32_t smaPreCommit(SSma *pSma) { return tdProcessRSmaPreCommitImpl(pSma); }
|
|||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaCommit(SSma *pSma) { return tdProcessRSmaCommitImpl(pSma); }
|
||||
int32_t smaSyncCommit(SSma *pSma) { return tdProcessRSmaSyncCommitImpl(pSma); }
|
||||
|
||||
/**
|
||||
* @brief Only applicable to Rollup SMA
|
||||
|
@ -41,7 +45,31 @@ int32_t smaCommit(SSma *pSma) { return tdProcessRSmaCommitImpl(pSma); }
|
|||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaPostCommit(SSma *pSma) { return tdProcessRSmaPostCommitImpl(pSma); }
|
||||
int32_t smaSyncPostCommit(SSma *pSma) { return tdProcessRSmaSyncPostCommitImpl(pSma); }
|
||||
|
||||
/**
|
||||
* @brief Only applicable to Rollup SMA
|
||||
*
|
||||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaAsyncPreCommit(SSma *pSma) { return tdProcessRSmaAsyncPreCommitImpl(pSma); }
|
||||
|
||||
/**
|
||||
* @brief Only applicable to Rollup SMA
|
||||
*
|
||||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaAsyncCommit(SSma *pSma) { return tdProcessRSmaAsyncCommitImpl(pSma); }
|
||||
|
||||
/**
|
||||
* @brief Only applicable to Rollup SMA
|
||||
*
|
||||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t smaAsyncPostCommit(SSma *pSma) { return tdProcessRSmaAsyncPostCommitImpl(pSma); }
|
||||
|
||||
/**
|
||||
* @brief set rsma trigger stat active
|
||||
|
@ -62,18 +90,17 @@ int32_t smaBegin(SSma *pSma) {
|
|||
atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED, TASK_TRIGGER_STAT_ACTIVE);
|
||||
switch (rsmaTriggerStat) {
|
||||
case TASK_TRIGGER_STAT_PAUSED: {
|
||||
smaDebug("vgId:%d rsma trigger stat from paused to active", SMA_VID(pSma));
|
||||
smaDebug("vgId:%d, rsma trigger stat from paused to active", SMA_VID(pSma));
|
||||
break;
|
||||
}
|
||||
case TASK_TRIGGER_STAT_INIT: {
|
||||
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE);
|
||||
smaDebug("vgId:%d rsma trigger stat from init to active", SMA_VID(pSma));
|
||||
smaDebug("vgId:%d, rsma trigger stat from init to active", SMA_VID(pSma));
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE);
|
||||
smaWarn("vgId:%d rsma trigger stat %" PRIi8 " is unexpected", SMA_VID(pSma), rsmaTriggerStat);
|
||||
ASSERT(0);
|
||||
smaError("vgId:%d, rsma trigger stat %" PRIi8 " is unexpected", SMA_VID(pSma), rsmaTriggerStat);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -81,7 +108,7 @@ int32_t smaBegin(SSma *pSma) {
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief pre-commit for rollup sma.
|
||||
* @brief pre-commit for rollup sma(sync commit).
|
||||
* 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED.
|
||||
* 2) wait all triggered fetch tasks finished
|
||||
* 3) perform persist task for qTaskInfo
|
||||
|
@ -89,7 +116,7 @@ int32_t smaBegin(SSma *pSma) {
|
|||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
|
||||
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma) {
|
||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||
if (!pSmaEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -98,8 +125,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
|
|||
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||
|
||||
|
||||
// step 1: set persistence task paused
|
||||
// step 1: set rsma stat paused
|
||||
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
|
||||
|
||||
// step 2: wait all triggered fetch tasks finished
|
||||
|
@ -119,7 +145,9 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
|
|||
}
|
||||
|
||||
// step 3: perform persist task for qTaskInfo
|
||||
tdRSmaPersistExecImpl(pRSmaStat);
|
||||
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
|
||||
pRSmaStat->commitSubmitVer = pRSmaStat->submitVer;
|
||||
tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat));
|
||||
|
||||
smaDebug("vgId:%d, rsma pre commit success", SMA_VID(pSma));
|
||||
|
||||
|
@ -132,7 +160,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
|
|||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdProcessRSmaCommitImpl(SSma *pSma) {
|
||||
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma) {
|
||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||
if (!pSmaEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -140,21 +168,9 @@ static int32_t tdProcessRSmaCommitImpl(SSma *pSma) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief post-commit for rollup sma
|
||||
* 1) clean up the outdated qtaskinfo files
|
||||
*
|
||||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma) {
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
|
||||
if (!VND_IS_RSMA(pVnode)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int64_t committed = pVnode->state.committed;
|
||||
static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
int64_t committed = pRSmaStat->commitAppliedVer;
|
||||
TdDirPtr pDir = NULL;
|
||||
TdDirEntryPtr pDirEntry = NULL;
|
||||
char dir[TSDB_FILENAME_LEN];
|
||||
|
@ -222,5 +238,159 @@ static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma) {
|
|||
|
||||
taosCloseDir(&pDir);
|
||||
regfree(®ex);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief post-commit for rollup sma
|
||||
* 1) clean up the outdated qtaskinfo files
|
||||
*
|
||||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
if (!VND_IS_RSMA(pVnode)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(SMA_ENV_STAT(pSmaEnv));
|
||||
|
||||
// cleanup outdated qtaskinfo files
|
||||
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Rsma async commit implementation
|
||||
* 1) set rsma stat TASK_TRIGGER_STAT_PAUSED
|
||||
* 2) Wait all running fetch task finish to fetch and put submitMsg into level 2/3 wQueue(blocking level 1 write)
|
||||
* 3)
|
||||
*
|
||||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
|
||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||
if (!pSmaEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||
|
||||
// step 1: set rsma stat
|
||||
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
|
||||
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
|
||||
|
||||
// step 2: wait all triggered fetch tasks finished
|
||||
int32_t nLoops = 0;
|
||||
while (1) {
|
||||
if (T_REF_VAL_GET(pStat) == 0) {
|
||||
smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma));
|
||||
break;
|
||||
} else {
|
||||
smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma));
|
||||
}
|
||||
++nLoops;
|
||||
if (nLoops > 1000) {
|
||||
sched_yield();
|
||||
nLoops = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// step 3: swap rsmaInfoHash and iRsmaInfoHash
|
||||
ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat));
|
||||
ASSERT(RSMA_INFO_HASH(pRSmaStat));
|
||||
|
||||
RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat);
|
||||
RSMA_INFO_HASH(pRSmaStat) =
|
||||
taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
|
||||
|
||||
if (!RSMA_INFO_HASH(pRSmaStat)) {
|
||||
smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr());
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
// step 4: others
|
||||
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
|
||||
pRSmaStat->commitSubmitVer = pRSmaStat->submitVer;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief commit for rollup sma
|
||||
*
|
||||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) {
|
||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||
if (!pSmaEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||
|
||||
// perform persist task for qTaskInfo
|
||||
tdRSmaPersistExecImpl(pRSmaStat, RSMA_IMU_INFO_HASH(pRSmaStat));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsmaInfoHash not empty.
|
||||
*
|
||||
* @param pSma
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
|
||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||
if (!pSmaEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||
|
||||
// step 1: merge rsmaInfoHash and iRsmaInfoHash
|
||||
taosWLockLatch(SMA_ENV_LOCK(pSmaEnv));
|
||||
|
||||
if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
|
||||
// TODO: optimization - just switch the hash pointer if rsmaInfoHash is empty
|
||||
}
|
||||
|
||||
void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL);
|
||||
while (pIter) {
|
||||
tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
|
||||
|
||||
if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) {
|
||||
taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
|
||||
smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma),
|
||||
*pSuid);
|
||||
} else {
|
||||
// free the resources
|
||||
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
|
||||
tdFreeRSmaInfo(pSma, pRSmaInfo, false);
|
||||
smaDebug("vgId:%d, rsma async post commit, free rsma info since already COW for table:%" PRIi64, SMA_VID(pSma),
|
||||
*pSuid);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
|
||||
}
|
||||
|
||||
taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat));
|
||||
RSMA_IMU_INFO_HASH(pRSmaStat) = NULL;
|
||||
|
||||
taosWUnLockLatch(SMA_ENV_LOCK(pSmaEnv));
|
||||
|
||||
// step 2: cleanup outdated qtaskinfo files
|
||||
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
typedef struct SSmaStat SSmaStat;
|
||||
|
||||
#define RSMA_TASK_INFO_HASH_SLOT 8
|
||||
#define SMA_MGMT_REF_NUM 10240
|
||||
|
||||
extern SSmaMgmt smaMgmt;
|
||||
|
@ -109,12 +108,7 @@ static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path)
|
|||
|
||||
SMA_ENV_TYPE(pEnv) = smaType;
|
||||
|
||||
int code = taosThreadRwlockInit(&(pEnv->lock), NULL);
|
||||
if (code) {
|
||||
terrno = TAOS_SYSTEM_ERROR(code);
|
||||
taosMemoryFree(pEnv);
|
||||
return NULL;
|
||||
}
|
||||
taosInitRWLatch(&(pEnv->lock));
|
||||
|
||||
if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) {
|
||||
tdFreeSmaEnv(pEnv);
|
||||
|
@ -148,7 +142,6 @@ static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEn
|
|||
void tdDestroySmaEnv(SSmaEnv *pSmaEnv) {
|
||||
if (pSmaEnv) {
|
||||
pSmaEnv->pStat = tdFreeSmaState(pSmaEnv->pStat, SMA_ENV_TYPE(pSmaEnv));
|
||||
taosThreadRwlockDestroy(&(pSmaEnv->lock));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -260,7 +253,7 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
|
|||
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL);
|
||||
while (infoHash) {
|
||||
SRSmaInfo *pSmaInfo = *(SRSmaInfo **)infoHash;
|
||||
tdFreeRSmaInfo(pSma, pSmaInfo);
|
||||
tdFreeRSmaInfo(pSma, pSmaInfo, true);
|
||||
infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash);
|
||||
}
|
||||
}
|
||||
|
@ -311,7 +304,6 @@ int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
|
|||
if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) {
|
||||
smaError("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " failed since %s", SMA_VID(pRSmaStat->pSma),
|
||||
RSMA_REF_ID(pRSmaStat), smaMgmt.rsetId, terrstr());
|
||||
ASSERT(0);
|
||||
} else {
|
||||
smaDebug("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " succeed", SMA_VID(pRSmaStat->pSma),
|
||||
RSMA_REF_ID(pRSmaStat), smaMgmt.rsetId);
|
||||
|
|
|
@ -48,20 +48,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables);
|
|||
static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int64_t *committed);
|
||||
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed);
|
||||
|
||||
struct SRSmaInfoItem {
|
||||
void *taskInfo; // qTaskInfo_t
|
||||
int64_t refId;
|
||||
tmr_h tmrId;
|
||||
int32_t maxDelay;
|
||||
int8_t level;
|
||||
int8_t triggerStat;
|
||||
};
|
||||
|
||||
struct SRSmaInfo {
|
||||
STSchema *pTSchema;
|
||||
int64_t suid;
|
||||
SRSmaInfoItem items[TSDB_RETENTION_L2];
|
||||
};
|
||||
|
||||
static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) {
|
||||
// adapt accordingly if definition of SRSmaInfo update
|
||||
|
@ -102,8 +89,9 @@ static FORCE_INLINE int32_t tdRSmaQTaskInfoContLen(int32_t lenWithHead) {
|
|||
|
||||
static FORCE_INLINE void tdRSmaQTaskInfoIterDestroy(SRSmaQTaskInfoIter *pIter) { taosMemoryFreeClear(pIter->pBuf); }
|
||||
|
||||
static FORCE_INLINE void tdFreeTaskHandle(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
|
||||
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
|
||||
// Note: free/kill may in RC
|
||||
if (!taskHandle) return;
|
||||
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
|
||||
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
|
||||
smaDebug("vgId:%d, free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
|
||||
|
@ -111,25 +99,36 @@ static FORCE_INLINE void tdFreeTaskHandle(qTaskInfo_t *taskHandle, int32_t vgId,
|
|||
} else {
|
||||
smaDebug("vgId:%d, not free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
|
||||
}
|
||||
// TODO: clear files related to qTaskInfo?
|
||||
}
|
||||
|
||||
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
|
||||
/**
|
||||
* @brief general function to free rsmaInfo
|
||||
*
|
||||
* @param pSma
|
||||
* @param pInfo
|
||||
* @param isDeepFree Only stop tmrId and free pTSchema for deep free
|
||||
* @return void*
|
||||
*/
|
||||
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) {
|
||||
if (pInfo) {
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
SRSmaInfoItem *pItem = &pInfo->items[i];
|
||||
if (pItem->taskInfo) {
|
||||
if (pItem->tmrId) {
|
||||
if (isDeepFree && pItem->tmrId) {
|
||||
smaDebug("vgId:%d, table %" PRIi64 " stop fetch timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId,
|
||||
i + 1);
|
||||
taosTmrStopA(&pItem->tmrId);
|
||||
}
|
||||
tdFreeTaskHandle(&pItem->taskInfo, SMA_VID(pSma), i + 1);
|
||||
tdFreeQTaskInfo(&pItem->taskInfo, SMA_VID(pSma), i + 1);
|
||||
} else {
|
||||
smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma),
|
||||
pInfo->suid, i + 1);
|
||||
}
|
||||
}
|
||||
taosMemoryFree(pInfo->pTSchema);
|
||||
if (isDeepFree) {
|
||||
taosMemoryFree(pInfo->pTSchema);
|
||||
}
|
||||
taosMemoryFree(pInfo);
|
||||
}
|
||||
|
||||
|
@ -151,7 +150,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
|
|||
|
||||
if (!suid || !tbUids) {
|
||||
terrno = TSDB_CODE_INVALID_PTR;
|
||||
smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
|
||||
smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
|
@ -165,7 +164,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
|
|||
|
||||
if (pRSmaInfo->items[0].taskInfo) {
|
||||
if ((qUpdateQualifiedTableId(pRSmaInfo->items[0].taskInfo, tbUids, true) < 0)) {
|
||||
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
|
||||
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
|
||||
return TSDB_CODE_FAILED;
|
||||
} else {
|
||||
smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
|
||||
|
@ -175,7 +174,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
|
|||
|
||||
if (pRSmaInfo->items[1].taskInfo) {
|
||||
if ((qUpdateQualifiedTableId(pRSmaInfo->items[1].taskInfo, tbUids, true) < 0)) {
|
||||
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
|
||||
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
|
||||
return TSDB_CODE_FAILED;
|
||||
} else {
|
||||
smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
|
||||
|
@ -257,22 +256,22 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui
|
|||
|
||||
static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo,
|
||||
int8_t idx) {
|
||||
SRetention *pRetention = SMA_RETENTION(pSma);
|
||||
STsdbCfg *pTsdbCfg = SMA_TSDB_CFG(pSma);
|
||||
if ((param->qmsgLen > 0) && param->qmsg[idx]) {
|
||||
SRetention *pRetention = SMA_RETENTION(pSma);
|
||||
STsdbCfg *pTsdbCfg = SMA_TSDB_CFG(pSma);
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
SReadHandle handle = {
|
||||
.meta = pVnode->pMeta,
|
||||
.vnode = pVnode,
|
||||
.initTqReader = 1,
|
||||
};
|
||||
|
||||
SReadHandle handle = {
|
||||
.meta = pSma->pVnode->pMeta,
|
||||
.vnode = pSma->pVnode,
|
||||
.initTqReader = 1,
|
||||
};
|
||||
|
||||
if (param->qmsg[idx]) {
|
||||
SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]);
|
||||
pItem->refId = RSMA_REF_ID(pStat);
|
||||
pItem->taskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
|
||||
if (!pItem->taskInfo) {
|
||||
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
|
||||
goto _err;
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
pItem->triggerStat = TASK_TRIGGER_STAT_INACTIVE;
|
||||
if (param->maxdelay[idx] < TSDB_MIN_ROLLUP_MAX_DELAY) {
|
||||
|
@ -286,13 +285,11 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
|
|||
pItem->maxDelay = TSDB_MAX_ROLLUP_MAX_DELAY;
|
||||
}
|
||||
pItem->level = idx == 0 ? TSDB_RETENTION_L1 : TSDB_RETENTION_L2;
|
||||
smaInfo("vgId:%d table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
|
||||
smaInfo("vgId:%d, table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
|
||||
", finally maxdelay:%" PRIi32,
|
||||
SMA_VID(pSma), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
|
||||
TD_VID(pVnode), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -357,7 +354,7 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
tdFreeRSmaInfo(pSma, pRSmaInfo);
|
||||
tdFreeRSmaInfo(pSma, pRSmaInfo, true);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
|
@ -562,7 +559,9 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
|
|||
SSDataBlock *output = NULL;
|
||||
uint64_t ts;
|
||||
if (qExecTask(pItem->taskInfo, &output, &ts) < 0) {
|
||||
ASSERT(false);
|
||||
smaError("vgId:%d, qExecTask for rsma table %" PRIi64 "l evel %" PRIi8 " failed since %s", SMA_VID(pSma), suid,
|
||||
pItem->level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
if (!output) {
|
||||
break;
|
||||
|
@ -572,7 +571,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
|
|||
pResult = taosArrayInit(1, sizeof(SSDataBlock));
|
||||
if (!pResult) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return TSDB_CODE_FAILED;
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -649,9 +648,18 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief During async commit, the SRSmaInfo object would be COW from iRSmaInfoHash and write lock should be applied.
|
||||
*
|
||||
* @param pSma
|
||||
* @param suid
|
||||
* @return SRSmaInfo*
|
||||
*/
|
||||
static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
||||
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
|
||||
SRSmaStat *pStat = NULL;
|
||||
SRSmaInfo *pRSmaInfo = NULL;
|
||||
|
||||
if (!pEnv) {
|
||||
// only applicable when rsma env exists
|
||||
return NULL;
|
||||
|
@ -662,11 +670,37 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
SRSmaInfo *pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||
if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
|
||||
pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||
if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
|
||||
return pRSmaInfo;
|
||||
}
|
||||
|
||||
if (RSMA_COMMIT_STAT(pStat) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
return pRSmaInfo;
|
||||
|
||||
// clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat
|
||||
SRSmaInfo *pCowRSmaInfo = NULL;
|
||||
// lock
|
||||
taosWLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||
if (iRSmaInfo) {
|
||||
SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo;
|
||||
if (pIRSmaInfo) {
|
||||
if (tdCloneRSmaInfo(pSma, pCowRSmaInfo, pIRSmaInfo) < 0) {
|
||||
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
|
||||
return NULL;
|
||||
}
|
||||
if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) {
|
||||
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
// unlock
|
||||
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
return pCowRSmaInfo;
|
||||
}
|
||||
|
||||
static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
|
||||
|
@ -891,10 +925,17 @@ int32_t tdProcessRSmaRestoreImpl(SSma *pSma) {
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
smaError("vgId:%d failed to restore rsma task since %s", SMA_VID(pSma), terrstr());
|
||||
smaError("vgId:%d, failed to restore rsma task since %s", SMA_VID(pSma), terrstr());
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Restore from SRSmaQTaskInfoItem
|
||||
*
|
||||
* @param pSma
|
||||
* @param pItem
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *pItem) {
|
||||
SRSmaInfo *pRSmaInfo = NULL;
|
||||
void *qTaskInfo = NULL;
|
||||
|
@ -920,7 +961,7 @@ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *
|
|||
|
||||
if (qDeserializeTaskStatus(qTaskInfo, pItem->qTaskInfo, pItem->len) < 0) {
|
||||
smaError("vgId:%d, restore rsma task failed for table:%" PRIi64 " level %d since %s", SMA_VID(pSma), pItem->suid,
|
||||
pItem->type, terrstr(terrno));
|
||||
pItem->type, terrstr());
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
smaDebug("vgId:%d, restore rsma task success for table:%" PRIi64 " level %d", SMA_VID(pSma), pItem->suid,
|
||||
|
@ -1067,26 +1108,27 @@ static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, SRSmaQTaskInfoIter *pIter) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
||||
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
|
||||
SSma *pSma = pRSmaStat->pSma;
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
int32_t vid = SMA_VID(pSma);
|
||||
int64_t toffset = 0;
|
||||
bool isFileCreated = false;
|
||||
|
||||
if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
|
||||
if (taosHashGetSize(pInfoHash) <= 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL);
|
||||
void *infoHash = taosHashIterate(pInfoHash, NULL);
|
||||
if (!infoHash) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STFile tFile = {0};
|
||||
if (RSMA_SUBMIT_VER(pRSmaStat) > 0) {
|
||||
#if 0
|
||||
if (pRSmaStat->commitAppliedVer > 0) {
|
||||
char qTaskInfoFName[TSDB_FILENAME_LEN];
|
||||
tdRSmaQTaskInfoGetFName(vid, pSma->pVnode->state.applied, qTaskInfoFName);
|
||||
tdRSmaQTaskInfoGetFName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
|
||||
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
|
||||
smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
|
||||
goto _err;
|
||||
|
@ -1099,6 +1141,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
|||
|
||||
isFileCreated = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
while (infoHash) {
|
||||
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash;
|
||||
|
@ -1114,7 +1157,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
|||
int8_t type = (int8_t)(i + 1);
|
||||
if (qSerializeTaskStatus(taskInfo, &pOutput, &len) < 0) {
|
||||
smaError("vgId:%d, rsma, table %" PRIi64 " level %d serialize qTaskInfo failed since %s", vid, pRSmaInfo->suid,
|
||||
i + 1, terrstr(terrno));
|
||||
i + 1, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
if (!pOutput || len <= 0) {
|
||||
|
@ -1130,7 +1173,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
|||
|
||||
if (!isFileCreated) {
|
||||
char qTaskInfoFName[TSDB_FILENAME_LEN];
|
||||
tdRSmaQTaskInfoGetFName(vid, pSma->pVnode->state.applied, qTaskInfoFName);
|
||||
tdRSmaQTaskInfoGetFName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
|
||||
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
|
||||
smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
|
||||
goto _err;
|
||||
|
@ -1163,11 +1206,11 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
|
|||
taosMemoryFree(pOutput);
|
||||
}
|
||||
|
||||
infoHash = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), infoHash);
|
||||
infoHash = taosHashIterate(pInfoHash, infoHash);
|
||||
}
|
||||
|
||||
if (isFileCreated) {
|
||||
tFile.info.qTaskInfo.submitVer = atomic_load_64(&pRSmaStat->submitVer);
|
||||
tFile.info.qTaskInfo.submitVer = atomic_load_64(&pRSmaStat->commitSubmitVer);
|
||||
if (tdUpdateTFileHeader(&tFile) < 0) {
|
||||
smaError("vgId:%d, rsma, failed to update tfile %s header since %s", vid, TD_TFILE_FULL_NAME(&tFile),
|
||||
tstrerror(terrno));
|
||||
|
@ -1217,6 +1260,10 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data since stat is %" PRIi8 ", rsetId rsetId:%" PRIi64
|
||||
" refId:%d",
|
||||
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pItem->refId);
|
||||
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
|
||||
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay > 5000 ? 5000 : pItem->maxDelay, pItem, smaMgmt.tmrHandle,
|
||||
&pItem->tmrId);
|
||||
}
|
||||
return;
|
||||
}
|
||||
default:
|
||||
|
|
|
@ -313,4 +313,99 @@ int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t srcTaskInfo, SRSmaParam *param,
|
||||
tb_uid_t suid, int8_t idx) {
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
char *pOutput = NULL;
|
||||
int32_t len = 0;
|
||||
|
||||
if (qSerializeTaskStatus(srcTaskInfo, &pOutput, &len) < 0) {
|
||||
smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid,
|
||||
terrstr());
|
||||
goto _err;
|
||||
}
|
||||
|
||||
SReadHandle handle = {
|
||||
.meta = pVnode->pMeta,
|
||||
.vnode = pVnode,
|
||||
.initTqReader = 1,
|
||||
};
|
||||
ASSERT(!dstTaskInfo);
|
||||
dstTaskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
|
||||
if (!dstTaskInfo) {
|
||||
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (qDeserializeTaskStatus(dstTaskInfo, pOutput, len) < 0) {
|
||||
smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
|
||||
terrstr());
|
||||
goto _err;
|
||||
}
|
||||
|
||||
smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid);
|
||||
|
||||
taosMemoryFreeClear(pOutput);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
taosMemoryFreeClear(pOutput);
|
||||
tdFreeQTaskInfo(dstTaskInfo, TD_VID(pVnode), idx + 1);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief pTSchema is shared
|
||||
*
|
||||
* @param pSma
|
||||
* @param pDest
|
||||
* @param pSrc
|
||||
* @return int32_t
|
||||
*/
|
||||
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc) {
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
SRSmaParam *param = NULL;
|
||||
if (!pSrc) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (!pDest) {
|
||||
pDest = taosMemoryCalloc(1, sizeof(SRSmaInfo));
|
||||
if (!pDest) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(pDest, pSrc, sizeof(SRSmaInfo));
|
||||
|
||||
SMetaReader mr = {0};
|
||||
metaReaderInit(&mr, SMA_META(pSma), 0);
|
||||
smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid);
|
||||
if (metaGetTableEntryByUid(&mr, pSrc->suid) < 0) {
|
||||
smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid,
|
||||
terrstr());
|
||||
goto _err;
|
||||
}
|
||||
ASSERT(mr.me.type == TSDB_SUPER_TABLE);
|
||||
ASSERT(mr.me.uid == pSrc->suid);
|
||||
if (TABLE_IS_ROLLUP(mr.me.flags)) {
|
||||
param = &mr.me.stbEntry.rsmaParam;
|
||||
for (int i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
SRSmaInfoItem *pItem = &pSrc->items[i];
|
||||
if (pItem->taskInfo) {
|
||||
tdCloneQTaskInfo(pSma, pDest->items[i].taskInfo, pItem->taskInfo, param, pSrc->suid, i);
|
||||
}
|
||||
}
|
||||
smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid);
|
||||
}
|
||||
|
||||
metaReaderClear(&mr);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
metaReaderClear(&mr);
|
||||
tdFreeRSmaInfo(pSma, pDest, false);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
// ...
|
|
@ -476,9 +476,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
|
|||
if (--state->iFileSet >= 0) {
|
||||
pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
|
||||
} else {
|
||||
// tBlockDataClear(&state->blockData);
|
||||
// tBlockDataClear(&state->blockData, 1);
|
||||
if (state->pBlockData) {
|
||||
tBlockDataClear(state->pBlockData);
|
||||
tBlockDataClear(state->pBlockData, 1);
|
||||
state->pBlockData = NULL;
|
||||
}
|
||||
|
||||
|
@ -500,9 +500,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
|
|||
if (code) goto _err;
|
||||
|
||||
/* if (state->pBlockIdx) { */
|
||||
/* tBlockIdxReset(state->blockIdx); */
|
||||
/* } */
|
||||
/* tBlockIdxReset(state->blockIdx); */
|
||||
/* code = tMapDataSearch(&state->blockIdxMap, state->pBlockIdxExp, tGetBlockIdx, tCmprBlockIdx,
|
||||
* &state->blockIdx);
|
||||
*/
|
||||
|
@ -582,8 +580,8 @@ _err:
|
|||
state->aBlockIdx = NULL;
|
||||
}
|
||||
if (state->pBlockData) {
|
||||
// tBlockDataClear(&state->blockData);
|
||||
tBlockDataClear(state->pBlockData);
|
||||
// tBlockDataClear(&state->blockData, 1);
|
||||
tBlockDataClear(state->pBlockData, 1);
|
||||
state->pBlockData = NULL;
|
||||
}
|
||||
|
||||
|
@ -609,8 +607,8 @@ int32_t clearNextRowFromFS(void *iter) {
|
|||
state->aBlockIdx = NULL;
|
||||
}
|
||||
if (state->pBlockData) {
|
||||
// tBlockDataClear(&state->blockData);
|
||||
tBlockDataClear(state->pBlockData);
|
||||
// tBlockDataClear(&state->blockData, 1);
|
||||
tBlockDataClear(state->pBlockData, 1);
|
||||
state->pBlockData = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -263,7 +263,7 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
|
|||
taosArrayClear(pCommitter->aBlockIdx);
|
||||
tMapDataReset(&pCommitter->oBlockMap);
|
||||
tBlockDataReset(&pCommitter->oBlockData);
|
||||
pRSet = tsdbFSStateGetDFileSet(pTsdb->fs->nState, pCommitter->commitFid);
|
||||
pRSet = tsdbFSStateGetDFileSet(pTsdb->fs->nState, pCommitter->commitFid, TD_EQ);
|
||||
if (pRSet) {
|
||||
code = tsdbDataFReaderOpen(&pCommitter->pReader, pTsdb, pRSet);
|
||||
if (code) goto _err;
|
||||
|
@ -284,16 +284,7 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
|
|||
.fLast = {.commitID = pCommitter->commitID, .size = 0},
|
||||
.fSma = pRSet->fSma};
|
||||
} else {
|
||||
STfs *pTfs = pTsdb->pVnode->pTfs;
|
||||
SDiskID did = {.level = 0, .id = 0};
|
||||
|
||||
// TODO: alloc a new disk
|
||||
// tfsAllocDisk(pTfs, 0, &did);
|
||||
|
||||
// create the directory
|
||||
tfsMkdirRecurAt(pTfs, pTsdb->path, did);
|
||||
|
||||
wSet = (SDFileSet){.diskId = did,
|
||||
wSet = (SDFileSet){.diskId = (SDiskID){.level = 0, .id = 0},
|
||||
.fid = pCommitter->commitFid,
|
||||
.fHead = {.commitID = pCommitter->commitID, .offset = 0, .size = 0},
|
||||
.fData = {.commitID = pCommitter->commitID, .size = 0},
|
||||
|
@ -1001,10 +992,10 @@ _exit:
|
|||
static void tsdbCommitDataEnd(SCommitter *pCommitter) {
|
||||
taosArrayDestroy(pCommitter->aBlockIdx);
|
||||
tMapDataClear(&pCommitter->oBlockMap);
|
||||
tBlockDataClear(&pCommitter->oBlockData);
|
||||
tBlockDataClear(&pCommitter->oBlockData, 1);
|
||||
taosArrayDestroy(pCommitter->aBlockIdxN);
|
||||
tMapDataClear(&pCommitter->nBlockMap);
|
||||
tBlockDataClear(&pCommitter->nBlockData);
|
||||
tBlockDataClear(&pCommitter->nBlockData, 1);
|
||||
tTSchemaDestroy(pCommitter->skmTable.pTSchema);
|
||||
tTSchemaDestroy(pCommitter->skmRow.pTSchema);
|
||||
}
|
||||
|
|
|
@ -698,6 +698,6 @@ void tsdbFSStateDeleteDFileSet(STsdbFSState *pState, int32_t fid) {
|
|||
|
||||
SDelFile *tsdbFSStateGetDelFile(STsdbFSState *pState) { return pState->pDelFile; }
|
||||
|
||||
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid) {
|
||||
return (SDFileSet *)taosArraySearch(pState->aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ);
|
||||
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid, int32_t flag) {
|
||||
return (SDFileSet *)taosArraySearch(pState->aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, flag);
|
||||
}
|
||||
|
|
|
@ -63,10 +63,10 @@ typedef struct SBlockLoadSuppInfo {
|
|||
} SBlockLoadSuppInfo;
|
||||
|
||||
typedef struct SFilesetIter {
|
||||
int32_t numOfFiles; // number of total files
|
||||
int32_t index; // current accessed index in the list
|
||||
SArray* pFileList; // data file list
|
||||
int32_t order;
|
||||
int32_t numOfFiles; // number of total files
|
||||
int32_t index; // current accessed index in the list
|
||||
SArray* pFileList; // data file list
|
||||
int32_t order;
|
||||
} SFilesetIter;
|
||||
|
||||
typedef struct SFileDataBlockInfo {
|
||||
|
@ -830,8 +830,8 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI
|
|||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||
|
||||
int32_t code = tsdbReadColData(pReader->pFileReader, &pBlockScanInfo->blockIdx, pBlock, pSupInfo->colIds, numOfCols,
|
||||
pBlockData, NULL, NULL);
|
||||
int32_t code = tsdbReadColData(pReader->pFileReader, &pBlockScanInfo->blockIdx, pBlock, pSupInfo->colIds, numOfCols,
|
||||
pBlockData, NULL, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
@ -1991,7 +1991,7 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead
|
|||
|
||||
static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
|
||||
SReaderStatus* pStatus = &pReader->status;
|
||||
SArray* pIndexList = taosArrayInit(4, sizeof(SBlockIdx));
|
||||
SArray* pIndexList = taosArrayInit(4, sizeof(SBlockIdx));
|
||||
|
||||
while (1) {
|
||||
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
|
||||
|
@ -3006,14 +3006,14 @@ SArray* tsdbRetrieveDataBlock(STsdbReader* pReader, SArray* pIdList) {
|
|||
|
||||
code = doLoadFileBlockData(pReader, &pStatus->blockIter, pBlockScanInfo, &pStatus->fileBlockData);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tBlockDataClear(&pStatus->fileBlockData);
|
||||
tBlockDataClear(&pStatus->fileBlockData, 1);
|
||||
|
||||
terrno = code;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
|
||||
tBlockDataClear(&pStatus->fileBlockData);
|
||||
tBlockDataClear(&pStatus->fileBlockData, 1);
|
||||
return pReader->pResBlock->pDataBlock;
|
||||
}
|
||||
|
||||
|
@ -3132,8 +3132,8 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
|
|||
hasNext = (pBlockIter->numOfBlocks > 0);
|
||||
}
|
||||
|
||||
// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables,
|
||||
// pReader->pFileGroup->fid, pReader->idStr);
|
||||
// tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %s", pReader, numOfBlocks, numOfTables,
|
||||
// pReader->pFileGroup->fid, pReader->idStr);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -3204,4 +3204,3 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -979,21 +979,21 @@ int32_t tsdbReadColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBl
|
|||
|
||||
code = tBlockDataCopy(pBlockData, pBlockData2);
|
||||
if (code) {
|
||||
tBlockDataClear(pBlockData1);
|
||||
tBlockDataClear(pBlockData2);
|
||||
tBlockDataClear(pBlockData1, 1);
|
||||
tBlockDataClear(pBlockData2, 1);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData);
|
||||
if (code) {
|
||||
tBlockDataClear(pBlockData1);
|
||||
tBlockDataClear(pBlockData2);
|
||||
tBlockDataClear(pBlockData1, 1);
|
||||
tBlockDataClear(pBlockData2, 1);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
tBlockDataClear(pBlockData1);
|
||||
tBlockDataClear(pBlockData2);
|
||||
tBlockDataClear(pBlockData1, 1);
|
||||
tBlockDataClear(pBlockData2, 1);
|
||||
}
|
||||
|
||||
tFree(pBuf1);
|
||||
|
@ -1115,29 +1115,29 @@ int32_t tsdbReadBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *p
|
|||
for (iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
|
||||
code = tsdbReadSubBlockData(pReader, pBlockIdx, pBlock, iSubBlock, pBlockData1, ppBuf1, ppBuf2);
|
||||
if (code) {
|
||||
tBlockDataClear(pBlockData1);
|
||||
tBlockDataClear(pBlockData2);
|
||||
tBlockDataClear(pBlockData1, 1);
|
||||
tBlockDataClear(pBlockData2, 1);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
code = tBlockDataCopy(pBlockData, pBlockData2);
|
||||
if (code) {
|
||||
tBlockDataClear(pBlockData1);
|
||||
tBlockDataClear(pBlockData2);
|
||||
tBlockDataClear(pBlockData1, 1);
|
||||
tBlockDataClear(pBlockData2, 1);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// merge two block data
|
||||
code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData);
|
||||
if (code) {
|
||||
tBlockDataClear(pBlockData1);
|
||||
tBlockDataClear(pBlockData2);
|
||||
tBlockDataClear(pBlockData1, 1);
|
||||
tBlockDataClear(pBlockData2, 1);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
tBlockDataClear(pBlockData1);
|
||||
tBlockDataClear(pBlockData2);
|
||||
tBlockDataClear(pBlockData1, 1);
|
||||
tBlockDataClear(pBlockData2, 1);
|
||||
}
|
||||
|
||||
ASSERT(pBlock->nRow == pBlockData->nRow);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -36,15 +36,15 @@ int32_t tMapDataPutItem(SMapData *pMapData, void *pItem, int32_t (*tPutItemFn)(u
|
|||
|
||||
// alloc
|
||||
code = tRealloc((uint8_t **)&pMapData->aOffset, sizeof(int32_t) * pMapData->nItem);
|
||||
if (code) goto _err;
|
||||
if (code) goto _exit;
|
||||
code = tRealloc(&pMapData->pData, pMapData->nData);
|
||||
if (code) goto _err;
|
||||
if (code) goto _exit;
|
||||
|
||||
// put
|
||||
pMapData->aOffset[nItem] = offset;
|
||||
tPutItemFn(pMapData->pData + offset, pItem);
|
||||
|
||||
_err:
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -189,25 +189,12 @@ static FORCE_INLINE int32_t tGetTSDBKEY(uint8_t *p, TSDBKEY *pKey) {
|
|||
}
|
||||
|
||||
// SBlockIdx ======================================================
|
||||
void tBlockIdxReset(SBlockIdx *pBlockIdx) {
|
||||
pBlockIdx->minKey = TSKEY_MAX;
|
||||
pBlockIdx->maxKey = TSKEY_MIN;
|
||||
pBlockIdx->minVersion = VERSION_MAX;
|
||||
pBlockIdx->maxVersion = VERSION_MIN;
|
||||
pBlockIdx->offset = -1;
|
||||
pBlockIdx->size = -1;
|
||||
}
|
||||
|
||||
int32_t tPutBlockIdx(uint8_t *p, void *ph) {
|
||||
int32_t n = 0;
|
||||
SBlockIdx *pBlockIdx = (SBlockIdx *)ph;
|
||||
|
||||
n += tPutI64(p ? p + n : p, pBlockIdx->suid);
|
||||
n += tPutI64(p ? p + n : p, pBlockIdx->uid);
|
||||
n += tPutI64(p ? p + n : p, pBlockIdx->minKey);
|
||||
n += tPutI64(p ? p + n : p, pBlockIdx->maxKey);
|
||||
n += tPutI64v(p ? p + n : p, pBlockIdx->minVersion);
|
||||
n += tPutI64v(p ? p + n : p, pBlockIdx->maxVersion);
|
||||
n += tPutI64v(p ? p + n : p, pBlockIdx->offset);
|
||||
n += tPutI64v(p ? p + n : p, pBlockIdx->size);
|
||||
|
||||
|
@ -220,10 +207,6 @@ int32_t tGetBlockIdx(uint8_t *p, void *ph) {
|
|||
|
||||
n += tGetI64(p + n, &pBlockIdx->suid);
|
||||
n += tGetI64(p + n, &pBlockIdx->uid);
|
||||
n += tGetI64(p + n, &pBlockIdx->minKey);
|
||||
n += tGetI64(p + n, &pBlockIdx->maxKey);
|
||||
n += tGetI64v(p + n, &pBlockIdx->minVersion);
|
||||
n += tGetI64v(p + n, &pBlockIdx->maxVersion);
|
||||
n += tGetI64v(p + n, &pBlockIdx->offset);
|
||||
n += tGetI64v(p + n, &pBlockIdx->size);
|
||||
|
||||
|
@ -921,6 +904,76 @@ _exit:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t tPutColData(uint8_t *p, SColData *pColData) {
|
||||
int32_t n = 0;
|
||||
|
||||
n += tPutI16v(p ? p + n : p, pColData->cid);
|
||||
n += tPutI8(p ? p + n : p, pColData->type);
|
||||
n += tPutI8(p ? p + n : p, pColData->smaOn);
|
||||
n += tPutI32v(p ? p + n : p, pColData->nVal);
|
||||
n += tPutU8(p ? p + n : p, pColData->flag);
|
||||
|
||||
if (pColData->flag == HAS_NONE || pColData->flag == HAS_NULL) goto _exit;
|
||||
if (pColData->flag != HAS_VALUE) {
|
||||
// bitmap
|
||||
|
||||
int32_t size = BIT2_SIZE(pColData->nVal);
|
||||
if (p) {
|
||||
memcpy(p + n, pColData->pBitMap, size);
|
||||
}
|
||||
n += size;
|
||||
}
|
||||
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||
// offset
|
||||
|
||||
int32_t size = sizeof(int32_t) * pColData->nVal;
|
||||
if (p) {
|
||||
memcpy(p + n, pColData->aOffset, size);
|
||||
}
|
||||
n += size;
|
||||
}
|
||||
n += tPutI32v(p ? p + n : p, pColData->nData);
|
||||
if (p) {
|
||||
memcpy(p + n, pColData->pData, pColData->nData);
|
||||
}
|
||||
n += pColData->nData;
|
||||
|
||||
_exit:
|
||||
return n;
|
||||
}
|
||||
|
||||
int32_t tGetColData(uint8_t *p, SColData *pColData) {
|
||||
int32_t n = 0;
|
||||
|
||||
n += tGetI16v(p + n, &pColData->cid);
|
||||
n += tGetI8(p + n, &pColData->type);
|
||||
n += tGetI8(p + n, &pColData->smaOn);
|
||||
n += tGetI32v(p + n, &pColData->nVal);
|
||||
n += tGetU8(p + n, &pColData->flag);
|
||||
|
||||
if (pColData->flag == HAS_NONE || pColData->flag == HAS_NULL) goto _exit;
|
||||
if (pColData->flag != HAS_VALUE) {
|
||||
// bitmap
|
||||
|
||||
int32_t size = BIT2_SIZE(pColData->nVal);
|
||||
pColData->pBitMap = p + n;
|
||||
n += size;
|
||||
}
|
||||
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||
// offset
|
||||
|
||||
int32_t size = sizeof(int32_t) * pColData->nVal;
|
||||
pColData->aOffset = (int32_t *)(p + n);
|
||||
n += size;
|
||||
}
|
||||
n += tGetI32v(p + n, &pColData->nData);
|
||||
pColData->pData = p + n;
|
||||
n += pColData->nData;
|
||||
|
||||
_exit:
|
||||
return n;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tColDataCmprFn(const void *p1, const void *p2) {
|
||||
SColData *pColData1 = (SColData *)p1;
|
||||
SColData *pColData2 = (SColData *)p2;
|
||||
|
@ -962,11 +1015,11 @@ void tBlockDataReset(SBlockData *pBlockData) {
|
|||
taosArrayClear(pBlockData->aIdx);
|
||||
}
|
||||
|
||||
void tBlockDataClear(SBlockData *pBlockData) {
|
||||
void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear) {
|
||||
tFree((uint8_t *)pBlockData->aVersion);
|
||||
tFree((uint8_t *)pBlockData->aTSKEY);
|
||||
taosArrayDestroy(pBlockData->aIdx);
|
||||
taosArrayDestroyEx(pBlockData->aColData, tColDataClear);
|
||||
taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataClear : NULL);
|
||||
}
|
||||
|
||||
int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema) {
|
||||
|
@ -1079,6 +1132,46 @@ _err:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t tBlockDataCorrectSchema(SBlockData *pBlockData, SBlockData *pBlockDataFrom) {
|
||||
int32_t code = 0;
|
||||
|
||||
int32_t iColData = 0;
|
||||
for (int32_t iColDataFrom = 0; iColDataFrom < taosArrayGetSize(pBlockDataFrom->aIdx); iColDataFrom++) {
|
||||
SColData *pColDataFrom = tBlockDataGetColDataByIdx(pBlockDataFrom, iColDataFrom);
|
||||
|
||||
while (true) {
|
||||
SColData *pColData;
|
||||
if (iColData < taosArrayGetSize(pBlockData->aIdx)) {
|
||||
pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
|
||||
} else {
|
||||
pColData = NULL;
|
||||
}
|
||||
|
||||
if (pColData == NULL || pColData->cid > pColDataFrom->cid) {
|
||||
code = tBlockDataAddColData(pBlockData, iColData, &pColData);
|
||||
if (code) goto _exit;
|
||||
|
||||
tColDataInit(pColData, pColDataFrom->cid, pColDataFrom->type, pColDataFrom->smaOn);
|
||||
for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
|
||||
code = tColDataAppendValue(pColData, &COL_VAL_NONE(pColData->cid, pColData->type));
|
||||
if (code) goto _exit;
|
||||
}
|
||||
|
||||
iColData++;
|
||||
break;
|
||||
} else if (pColData->cid == pColDataFrom->cid) {
|
||||
iColData++;
|
||||
break;
|
||||
} else {
|
||||
iColData++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData) {
|
||||
int32_t code = 0;
|
||||
|
||||
|
@ -1239,6 +1332,52 @@ void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColD
|
|||
*ppColData = NULL;
|
||||
}
|
||||
|
||||
int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData) {
|
||||
int32_t n = 0;
|
||||
|
||||
n += tPutI32v(p ? p + n : p, pBlockData->nRow);
|
||||
if (p) {
|
||||
memcpy(p + n, pBlockData->aVersion, sizeof(int64_t) * pBlockData->nRow);
|
||||
}
|
||||
n = n + sizeof(int64_t) * pBlockData->nRow;
|
||||
if (p) {
|
||||
memcpy(p + n, pBlockData->aTSKEY, sizeof(TSKEY) * pBlockData->nRow);
|
||||
}
|
||||
n = n + sizeof(TSKEY) * pBlockData->nRow;
|
||||
|
||||
int32_t nCol = taosArrayGetSize(pBlockData->aIdx);
|
||||
n += tPutI32v(p ? p + n : p, nCol);
|
||||
for (int32_t iCol = 0; iCol < nCol; iCol++) {
|
||||
SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iCol);
|
||||
n += tPutColData(p ? p + n : p, pColData);
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData) {
|
||||
int32_t n = 0;
|
||||
|
||||
tBlockDataReset(pBlockData);
|
||||
|
||||
n += tGetI32v(p + n, &pBlockData->nRow);
|
||||
pBlockData->aVersion = (int64_t *)(p + n);
|
||||
n = n + sizeof(int64_t) * pBlockData->nRow;
|
||||
pBlockData->aTSKEY = (TSKEY *)(p + n);
|
||||
n = n + sizeof(TSKEY) * pBlockData->nRow;
|
||||
|
||||
int32_t nCol;
|
||||
n += tGetI32v(p + n, &nCol);
|
||||
for (int32_t iCol = 0; iCol < nCol; iCol++) {
|
||||
SColData *pColData;
|
||||
|
||||
if (tBlockDataAddColData(pBlockData, iCol, &pColData)) return -1;
|
||||
n += tGetColData(p + n, pColData);
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
// ALGORITHM ==============================
|
||||
void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
|
||||
SColVal colVal;
|
||||
|
|
|
@ -233,7 +233,8 @@ int vnodeCommit(SVnode *pVnode) {
|
|||
walBeginSnapshot(pVnode->pWal, pVnode->state.applied);
|
||||
|
||||
// preCommit
|
||||
smaPreCommit(pVnode->pSma);
|
||||
// smaSyncPreCommit(pVnode->pSma);
|
||||
smaAsyncPreCommit(pVnode->pSma);
|
||||
|
||||
// commit each sub-system
|
||||
if (metaCommit(pVnode->pMeta) < 0) {
|
||||
|
@ -242,6 +243,8 @@ int vnodeCommit(SVnode *pVnode) {
|
|||
}
|
||||
|
||||
if (VND_IS_RSMA(pVnode)) {
|
||||
smaAsyncCommit(pVnode->pSma);
|
||||
|
||||
if (tsdbCommit(VND_RSMA0(pVnode)) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
|
@ -276,7 +279,8 @@ int vnodeCommit(SVnode *pVnode) {
|
|||
pVnode->state.committed = info.state.committed;
|
||||
|
||||
// postCommit
|
||||
smaPostCommit(pVnode->pSma);
|
||||
// smaSyncPostCommit(pVnode->pSma);
|
||||
smaAsyncPostCommit(pVnode->pSma);
|
||||
|
||||
// apply the commit (TODO)
|
||||
walEndSnapshot(pVnode->pWal);
|
||||
|
|
|
@ -20,13 +20,13 @@ struct SVSnapReader {
|
|||
SVnode *pVnode;
|
||||
int64_t sver;
|
||||
int64_t ever;
|
||||
int64_t index;
|
||||
// meta
|
||||
int8_t metaDone;
|
||||
SMetaSnapReader *pMetaReader;
|
||||
// tsdb
|
||||
int8_t tsdbDone;
|
||||
STsdbSnapReader *pTsdbReader;
|
||||
uint8_t *pData;
|
||||
};
|
||||
|
||||
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
|
||||
|
@ -42,12 +42,7 @@ int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapRe
|
|||
pReader->sver = sver;
|
||||
pReader->ever = ever;
|
||||
|
||||
code = metaSnapReaderOpen(pVnode->pMeta, sver, ever, &pReader->pMetaReader);
|
||||
if (code) goto _err;
|
||||
|
||||
code = tsdbSnapReaderOpen(pVnode->pTsdb, sver, ever, &pReader->pTsdbReader);
|
||||
if (code) goto _err;
|
||||
|
||||
vInfo("vgId:%d vnode snapshot reader opened, sver:%" PRId64 " ever:%" PRId64, TD_VID(pVnode), sver, ever);
|
||||
*ppReader = pReader;
|
||||
return code;
|
||||
|
||||
|
@ -60,54 +55,85 @@ _err:
|
|||
int32_t vnodeSnapReaderClose(SVSnapReader *pReader) {
|
||||
int32_t code = 0;
|
||||
|
||||
tFree(pReader->pData);
|
||||
if (pReader->pTsdbReader) tsdbSnapReaderClose(&pReader->pTsdbReader);
|
||||
if (pReader->pMetaReader) metaSnapReaderClose(&pReader->pMetaReader);
|
||||
taosMemoryFree(pReader);
|
||||
if (pReader->pTsdbReader) {
|
||||
tsdbSnapReaderClose(&pReader->pTsdbReader);
|
||||
}
|
||||
|
||||
if (pReader->pMetaReader) {
|
||||
metaSnapReaderClose(&pReader->pMetaReader);
|
||||
}
|
||||
|
||||
vInfo("vgId:%d vnode snapshot reader closed", TD_VID(pReader->pVnode));
|
||||
taosMemoryFree(pReader);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData) {
|
||||
int32_t code = 0;
|
||||
|
||||
// META ==============
|
||||
if (!pReader->metaDone) {
|
||||
code = metaSnapRead(pReader->pMetaReader, &pReader->pData);
|
||||
// open reader if not
|
||||
if (pReader->pMetaReader == NULL) {
|
||||
code = metaSnapReaderOpen(pReader->pVnode->pMeta, pReader->sver, pReader->ever, &pReader->pMetaReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = metaSnapRead(pReader->pMetaReader, ppData);
|
||||
if (code) {
|
||||
if (code == TSDB_CODE_VND_READ_END) {
|
||||
goto _err;
|
||||
} else {
|
||||
if (*ppData) {
|
||||
goto _exit;
|
||||
} else {
|
||||
pReader->metaDone = 1;
|
||||
} else {
|
||||
goto _err;
|
||||
code = metaSnapReaderClose(&pReader->pMetaReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
} else {
|
||||
*ppData = pReader->pData;
|
||||
*nData = sizeof(SSnapDataHdr) + ((SSnapDataHdr *)pReader->pData)->size;
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
|
||||
// TSDB ==============
|
||||
if (!pReader->tsdbDone) {
|
||||
code = tsdbSnapRead(pReader->pTsdbReader, &pReader->pData);
|
||||
// open if not
|
||||
if (pReader->pTsdbReader == NULL) {
|
||||
code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, &pReader->pTsdbReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = tsdbSnapRead(pReader->pTsdbReader, ppData);
|
||||
if (code) {
|
||||
if (code == TSDB_CODE_VND_READ_END) {
|
||||
pReader->tsdbDone = 1;
|
||||
} else {
|
||||
goto _err;
|
||||
}
|
||||
goto _err;
|
||||
} else {
|
||||
*ppData = pReader->pData;
|
||||
*nData = sizeof(SSnapDataHdr) + ((SSnapDataHdr *)pReader->pData)->size;
|
||||
goto _exit;
|
||||
if (*ppData) {
|
||||
goto _exit;
|
||||
} else {
|
||||
pReader->tsdbDone = 1;
|
||||
code = tsdbSnapReaderClose(&pReader->pTsdbReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
code = TSDB_CODE_VND_READ_END;
|
||||
*ppData = NULL;
|
||||
*nData = 0;
|
||||
|
||||
_exit:
|
||||
if (*ppData) {
|
||||
SSnapDataHdr *pHdr = (SSnapDataHdr *)(*ppData);
|
||||
|
||||
pReader->index++;
|
||||
*nData = sizeof(SSnapDataHdr) + pHdr->size;
|
||||
pHdr->index = pReader->index;
|
||||
vInfo("vgId:%d vnode snapshot read data,index:%" PRId64 " type:%d nData:%d ", TD_VID(pReader->pVnode),
|
||||
pReader->index, pHdr->type, *nData);
|
||||
} else {
|
||||
vInfo("vgId:%d vnode snapshot read data end, index:%" PRId64, TD_VID(pReader->pVnode), pReader->index);
|
||||
}
|
||||
return code;
|
||||
|
||||
_err:
|
||||
vError("vgId:% snapshot read failed since %s", TD_VID(pReader->pVnode), tstrerror(code));
|
||||
vError("vgId:% vnode snapshot read failed since %s", TD_VID(pReader->pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -116,24 +142,13 @@ struct SVSnapWriter {
|
|||
SVnode *pVnode;
|
||||
int64_t sver;
|
||||
int64_t ever;
|
||||
int64_t index;
|
||||
// meta
|
||||
SMetaSnapWriter *pMetaSnapWriter;
|
||||
// tsdb
|
||||
STsdbSnapWriter *pTsdbSnapWriter;
|
||||
};
|
||||
|
||||
static int32_t vnodeSnapRollback(SVSnapWriter *pWriter) {
|
||||
int32_t code = 0;
|
||||
// TODO
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapCommit(SVSnapWriter *pWriter) {
|
||||
int32_t code = 0;
|
||||
// TODO
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) {
|
||||
int32_t code = 0;
|
||||
SVSnapWriter *pWriter = NULL;
|
||||
|
@ -148,62 +163,78 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
|
|||
pWriter->sver = sver;
|
||||
pWriter->ever = ever;
|
||||
|
||||
vInfo("vgId:%d vnode snapshot writer opened", TD_VID(pVnode));
|
||||
*ppWriter = pWriter;
|
||||
return code;
|
||||
|
||||
_err:
|
||||
vError("vgId:%d vnode snapshot writer open failed since %s", TD_VID(pVnode), tstrerror(code));
|
||||
*ppWriter = NULL;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (rollback) {
|
||||
code = vnodeSnapRollback(pWriter);
|
||||
if (code) goto _err;
|
||||
} else {
|
||||
code = vnodeSnapCommit(pWriter);
|
||||
if (pWriter->pMetaSnapWriter) {
|
||||
code = metaSnapWriterClose(&pWriter->pMetaSnapWriter, rollback);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
if (pWriter->pTsdbSnapWriter) {
|
||||
code = tsdbSnapWriterClose(&pWriter->pTsdbSnapWriter, rollback);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
_exit:
|
||||
vInfo("vgId:%d vnode snapshot writer closed, rollback:%d", TD_VID(pWriter->pVnode), rollback);
|
||||
taosMemoryFree(pWriter);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
vError("vgId:%d vnode snapshow writer close failed since %s", TD_VID(pWriter->pVnode), tstrerror(code));
|
||||
vError("vgId:%d vnode snapshot writer close failed since %s", TD_VID(pWriter->pVnode), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
|
||||
int32_t code = 0;
|
||||
SSnapDataHdr *pSnapDataHdr = (SSnapDataHdr *)pData;
|
||||
SSnapDataHdr *pHdr = (SSnapDataHdr *)pData;
|
||||
SVnode *pVnode = pWriter->pVnode;
|
||||
|
||||
ASSERT(pSnapDataHdr->size + sizeof(SSnapDataHdr) == nData);
|
||||
ASSERT(pHdr->size + sizeof(SSnapDataHdr) == nData);
|
||||
ASSERT(pHdr->index == pWriter->index + 1);
|
||||
pWriter->index = pHdr->index;
|
||||
|
||||
if (pSnapDataHdr->type == 0) {
|
||||
vInfo("vgId:%d vnode snapshot write data, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode), pHdr->index,
|
||||
pHdr->type, nData);
|
||||
|
||||
if (pHdr->type == 0) {
|
||||
// meta
|
||||
|
||||
if (pWriter->pMetaSnapWriter == NULL) {
|
||||
code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
|
||||
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
} else {
|
||||
// tsdb
|
||||
|
||||
if (pWriter->pTsdbSnapWriter == NULL) {
|
||||
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
|
||||
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
|
||||
_err:
|
||||
vError("vgId:%d vnode snapshot write failed since %s", TD_VID(pVnode), tstrerror(code));
|
||||
vError("vgId:%d vnode snapshot write failed since %s, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode),
|
||||
tstrerror(code), pHdr->index, pHdr->type, nData);
|
||||
return code;
|
||||
}
|
|
@ -689,6 +689,7 @@ _exit:
|
|||
tEncoderInit(&encoder, pRsp->pCont, pRsp->contLen);
|
||||
tEncodeSVDropTbBatchRsp(&encoder, &rsp);
|
||||
tEncoderClear(&encoder);
|
||||
taosArrayDestroy(rsp.pArray);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -900,7 +901,7 @@ static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *
|
|||
_err:
|
||||
tDecoderClear(&coder);
|
||||
vError("vgId:%d, failed to create tsma %s:%" PRIi64 " version %" PRIi64 "for table %" PRIi64 " since %s",
|
||||
TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr(terrno));
|
||||
TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
|
|||
|
||||
taosArrayPush(pJob->pTasks, &task);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %d task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ int32_t ctgInitGetDnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
|
|||
|
||||
taosArrayPush(pJob->pTasks, &task);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " the %d task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ int32_t ctgInitGetSvrVerTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
|
|||
|
||||
taosArrayPush(pJob->pTasks, &task);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " [%dth] task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -1980,6 +1980,7 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
qDebug("%s fetch rsp received, index:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfRows);
|
||||
} else {
|
||||
pSourceDataInfo->code = code;
|
||||
qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code));
|
||||
}
|
||||
|
||||
pSourceDataInfo->status = EX_SOURCE_DATA_READY;
|
||||
|
|
|
@ -1353,13 +1353,13 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
|
|||
if (chIds && pPullDataMap) {
|
||||
SArray* chAy = *(SArray**)chIds;
|
||||
int32_t size = taosArrayGetSize(chAy);
|
||||
qDebug("window %" PRId64 " wait child size:%d", win.skey, size);
|
||||
qDebug("===stream===window %" PRId64 " wait child size:%d", win.skey, size);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
qDebug("window %" PRId64 " wait child id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
|
||||
qDebug("===stream===window %" PRId64 " wait child id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
|
||||
}
|
||||
continue;
|
||||
} else if (pPullDataMap) {
|
||||
qDebug("close window %" PRId64, win.skey);
|
||||
qDebug("===stream===close window %" PRId64, win.skey);
|
||||
}
|
||||
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
|
||||
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
|
@ -2482,7 +2482,9 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
|
|||
SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId};
|
||||
// add pull data request
|
||||
taosArrayPush(pInfo->pPullWins, &pull);
|
||||
addPullWindow(pInfo->pPullDataMap, &winRes, taosArrayGetSize(pInfo->pChildren));
|
||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
addPullWindow(pInfo->pPullDataMap, &winRes, size);
|
||||
qDebug("===stream===prepare retrive %" PRId64 ", size:%d", winRes.ts, size);
|
||||
} else {
|
||||
int32_t index = -1;
|
||||
SArray* chArray = NULL;
|
||||
|
@ -2492,14 +2494,14 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
|
|||
chId = getChildIndex(pSDataBlock);
|
||||
index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
||||
}
|
||||
if (index != -1 && pSDataBlock->info.type == STREAM_PULL_DATA) {
|
||||
qDebug("======delete child id %d", chId);
|
||||
taosArrayRemove(chArray, index);
|
||||
if (taosArrayGetSize(chArray) == 0) {
|
||||
// pull data is over
|
||||
taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
|
||||
}
|
||||
}
|
||||
// if (index != -1 && pSDataBlock->info.type == STREAM_PULL_DATA) {
|
||||
// qDebug("===stream===delete child id %d", chId);
|
||||
// taosArrayRemove(chArray, index);
|
||||
// if (taosArrayGetSize(chArray) == 0) {
|
||||
// // pull data is over
|
||||
// taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
|
||||
// }
|
||||
// }
|
||||
if (index == -1 || pSDataBlock->info.type == STREAM_PULL_DATA) {
|
||||
ignore = false;
|
||||
}
|
||||
|
@ -2623,6 +2625,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
|
|||
SArray* chArray = *(SArray**)chIds;
|
||||
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
|
||||
if (index != -1) {
|
||||
qDebug("===stream===window %" PRId64 " delete child id %d", winRes.ts, chId);
|
||||
taosArrayRemove(chArray, index);
|
||||
if (taosArrayGetSize(chArray) == 0) {
|
||||
// pull data is over
|
||||
|
@ -2641,7 +2644,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
|
||||
qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
|
@ -2657,18 +2660,18 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
return NULL;
|
||||
}
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->binfo.pRes;
|
||||
} else {
|
||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->binfo.pRes;
|
||||
}
|
||||
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
|
||||
pInfo->returnUpdate = false;
|
||||
ASSERT(!IS_FINAL_OP(pInfo));
|
||||
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
// process the rest of the data
|
||||
return pInfo->pUpdateRes;
|
||||
}
|
||||
|
@ -2676,13 +2679,13 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
if (pInfo->pPullDataRes->info.rows != 0) {
|
||||
// process the rest of the data
|
||||
ASSERT(IS_FINAL_OP(pInfo));
|
||||
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->pPullDataRes;
|
||||
}
|
||||
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
||||
if (pInfo->pDelRes->info.rows != 0) {
|
||||
// process the rest of the data
|
||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->pDelRes;
|
||||
}
|
||||
}
|
||||
|
@ -2693,10 +2696,10 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
clearSpecialDataBlock(pInfo->pUpdateRes);
|
||||
removeDeleteResults(pUpdated, pInfo->pDelWins);
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
break;
|
||||
}
|
||||
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
|
||||
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
|
||||
maxTs = TMAX(maxTs, pBlock->info.window.ekey);
|
||||
|
||||
if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA ||
|
||||
|
@ -2771,6 +2774,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info;
|
||||
pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
|
||||
taosArrayPush(pInfo->pChildren, &pChildOp);
|
||||
qDebug("===stream===add child, id:%d", chIndex);
|
||||
}
|
||||
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex);
|
||||
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
|
||||
|
@ -2795,14 +2799,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
|
||||
if (pInfo->binfo.pRes->info.rows != 0) {
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->binfo.pRes;
|
||||
}
|
||||
|
||||
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
|
||||
pInfo->returnUpdate = false;
|
||||
ASSERT(!IS_FINAL_OP(pInfo));
|
||||
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
// process the rest of the data
|
||||
return pInfo->pUpdateRes;
|
||||
}
|
||||
|
@ -2811,14 +2815,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
|
|||
if (pInfo->pPullDataRes->info.rows != 0) {
|
||||
// process the rest of the data
|
||||
ASSERT(IS_FINAL_OP(pInfo));
|
||||
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->pPullDataRes;
|
||||
}
|
||||
|
||||
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
|
||||
if (pInfo->pDelRes->info.rows != 0) {
|
||||
// process the rest of the data
|
||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
|
||||
return pInfo->pDelRes;
|
||||
}
|
||||
// ASSERT(false);
|
||||
|
|
|
@ -2408,6 +2408,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.getEnvFunc = getDiffFuncEnv,
|
||||
.initFunc = diffFunctionSetup,
|
||||
.processFunc = diffFunction,
|
||||
.sprocessFunc = diffScalarFunction,
|
||||
.finalizeFunc = functionFinalize
|
||||
},
|
||||
{
|
||||
|
|
|
@ -75,7 +75,9 @@ typedef struct SQWDebug {
|
|||
bool lockEnable;
|
||||
bool statusEnable;
|
||||
bool dumpEnable;
|
||||
bool tmp;
|
||||
bool sleepSimulate;
|
||||
bool deadSimulate;
|
||||
bool redirectSimulate;
|
||||
} SQWDebug;
|
||||
|
||||
extern SQWDebug gQWDebug;
|
||||
|
@ -130,12 +132,11 @@ typedef struct SQWTaskCtx {
|
|||
int8_t taskType;
|
||||
int8_t explain;
|
||||
int8_t needFetch;
|
||||
int32_t queryType;
|
||||
int32_t msgType;
|
||||
int32_t fetchType;
|
||||
int32_t execId;
|
||||
|
||||
bool queryRsped;
|
||||
bool queryFetched;
|
||||
bool queryEnd;
|
||||
bool queryContinue;
|
||||
bool queryInQueue;
|
||||
|
@ -228,6 +229,7 @@ typedef struct SQWorkerMgmt {
|
|||
#define QW_SET_EVENT_PROCESSED(ctx, event) atomic_store_8(&(ctx)->events[event], QW_EVENT_PROCESSED)
|
||||
|
||||
#define QW_GET_PHASE(ctx) atomic_load_8(&(ctx)->phase)
|
||||
#define QW_SET_PHASE(ctx, _value) do { if ((_value) != QW_PHASE_PRE_FETCH && (_value) != QW_PHASE_POST_FETCH) { atomic_store_8(&(ctx)->phase, _value); } } while (0)
|
||||
|
||||
#define QW_SET_RSP_CODE(ctx, code) atomic_store_32(&(ctx)->rspCode, code)
|
||||
#define QW_UPDATE_RSP_CODE(ctx, code) atomic_val_compare_exchange_32(&(ctx)->rspCode, 0, code)
|
||||
|
@ -362,7 +364,7 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
|
|||
int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
|
||||
int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
|
||||
void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx);
|
||||
int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx);
|
||||
int32_t qwKillTaskHandle(SQWTaskCtx *ctx);
|
||||
int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status);
|
||||
int32_t qwDropTask(QW_FPARAMS_DEF);
|
||||
void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx);
|
||||
|
@ -372,13 +374,15 @@ int32_t qwUpdateTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type);
|
|||
int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type);
|
||||
void qwClearExpiredSch(SQWorker *mgmt, SArray* pExpiredSch);
|
||||
int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch);
|
||||
void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx);
|
||||
void qwFreeTaskCtx(SQWTaskCtx *ctx);
|
||||
|
||||
void qwDbgDumpMgmtInfo(SQWorker *mgmt);
|
||||
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore);
|
||||
int32_t qwDbgBuildAndSendRedirectRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SEpSet *pEpSet);
|
||||
int32_t qwAddTaskCtx(QW_FPARAMS_DEF);
|
||||
int32_t qwDbgResponseRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx);
|
||||
void qwDbgSimulateRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx, bool *rsped);
|
||||
void qwDbgSimulateSleep(void);
|
||||
void qwDbgSimulateDead(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *rsped);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -40,11 +40,13 @@ void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComple
|
|||
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
|
||||
int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SQWTaskCtx *ctx);
|
||||
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num);
|
||||
int32_t qwBuildAndSendErrorRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code);
|
||||
void qwFreeFetchRsp(void *msg);
|
||||
int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp);
|
||||
int32_t qwBuildAndSendHbRsp(SRpcHandleInfo *pConn, SSchedulerHbRsp *rsp, int32_t code);
|
||||
int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
|
||||
int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *pConn);
|
||||
int32_t qwBuildAndSendDropMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "tmsg.h"
|
||||
#include "tname.h"
|
||||
|
||||
SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = false, .tmp = false};
|
||||
SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = false, .redirectSimulate = false, .deadSimulate = false, .sleepSimulate = false};
|
||||
|
||||
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) {
|
||||
if (!gQWDebug.statusEnable) {
|
||||
|
@ -147,8 +147,17 @@ int32_t qwDbgBuildAndSendRedirectRsp(int32_t rspType, SRpcHandleInfo *pConn, int
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwDbgResponseRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx) {
|
||||
if (gQWDebug.tmp) {
|
||||
void qwDbgSimulateRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx, bool *rsped) {
|
||||
static int32_t ignoreTime = 0;
|
||||
if (*rsped) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (gQWDebug.redirectSimulate) {
|
||||
if (++ignoreTime <= 10) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (TDMT_SCH_QUERY == qwMsg->msgType && (0 == taosRand() % 3)) {
|
||||
SEpSet epSet = {0};
|
||||
epSet.inUse = 1;
|
||||
|
@ -162,42 +171,94 @@ int32_t qwDbgResponseRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx) {
|
|||
|
||||
ctx->phase = QW_PHASE_POST_QUERY;
|
||||
qwDbgBuildAndSendRedirectRsp(qwMsg->msgType + 1, &qwMsg->connInfo, TSDB_CODE_RPC_REDIRECT, &epSet);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
*rsped = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (TDMT_SCH_MERGE_QUERY == qwMsg->msgType && (0 == taosRand() % 3)) {
|
||||
ctx->phase = QW_PHASE_POST_QUERY;
|
||||
QW_SET_PHASE(ctx, QW_PHASE_POST_QUERY);
|
||||
qwDbgBuildAndSendRedirectRsp(qwMsg->msgType + 1, &qwMsg->connInfo, TSDB_CODE_RPC_REDIRECT, NULL);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
*rsped = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((TDMT_SCH_FETCH == qwMsg->msgType) && (0 == taosRand() % 9)) {
|
||||
qwDbgBuildAndSendRedirectRsp(qwMsg->msgType + 1, &qwMsg->connInfo, TSDB_CODE_RPC_REDIRECT, NULL);
|
||||
*rsped = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void qwDbgSimulateSleep(void) {
|
||||
if (!gQWDebug.sleepSimulate) {
|
||||
return;
|
||||
}
|
||||
|
||||
static int32_t ignoreTime = 0;
|
||||
if (++ignoreTime > 10) {
|
||||
taosSsleep(taosRand() % 20);
|
||||
}
|
||||
}
|
||||
|
||||
void qwDbgSimulateDead(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *rsped) {
|
||||
if (!gQWDebug.deadSimulate) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (*rsped) {
|
||||
return;
|
||||
}
|
||||
|
||||
static int32_t ignoreTime = 0;
|
||||
|
||||
if (++ignoreTime > 10 && 0 == taosRand() % 9) {
|
||||
SRpcHandleInfo *pConn = ((ctx->msgType == TDMT_SCH_FETCH || ctx->msgType == TDMT_SCH_MERGE_FETCH) ? &ctx->dataConnInfo : &ctx->ctrlConnInfo);
|
||||
qwBuildAndSendErrorRsp(ctx->msgType + 1, pConn, TSDB_CODE_RPC_BROKEN_LINK);
|
||||
|
||||
qwBuildAndSendDropMsg(QW_FPARAMS(), pConn);
|
||||
*rsped = true;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
int32_t qwDbgEnableDebug(char *option) {
|
||||
if (0 == strcasecmp(option, "lock")) {
|
||||
gQWDebug.lockEnable = true;
|
||||
qDebug("qw lock debug enabled");
|
||||
qError("qw lock debug enabled");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (0 == strcasecmp(option, "status")) {
|
||||
gQWDebug.statusEnable = true;
|
||||
qDebug("qw status debug enabled");
|
||||
qError("qw status debug enabled");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (0 == strcasecmp(option, "dump")) {
|
||||
gQWDebug.dumpEnable = true;
|
||||
qDebug("qw dump debug enabled");
|
||||
qError("qw dump debug enabled");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (0 == strcasecmp(option, "tmp")) {
|
||||
gQWDebug.tmp = true;
|
||||
qDebug("qw tmp debug enabled");
|
||||
if (0 == strcasecmp(option, "sleep")) {
|
||||
gQWDebug.sleepSimulate = true;
|
||||
qError("qw sleep debug enabled");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (0 == strcasecmp(option, "dead")) {
|
||||
gQWDebug.sleepSimulate = true;
|
||||
qError("qw dead debug enabled");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (0 == strcasecmp(option, "redirect")) {
|
||||
gQWDebug.redirectSimulate = true;
|
||||
qError("qw redirect debug enabled");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,6 +43,20 @@ void qwFreeFetchRsp(void *msg) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t qwBuildAndSendErrorRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code) {
|
||||
SRpcMsg rpcRsp = {
|
||||
.msgType = rspType,
|
||||
.pCont = NULL,
|
||||
.contLen = 0,
|
||||
.code = code,
|
||||
.info = *pConn,
|
||||
};
|
||||
|
||||
tmsgSendRsp(&rpcRsp);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SQWTaskCtx *ctx) {
|
||||
STbVerInfo* tbInfo = ctx ? &ctx->tbInfo : NULL;
|
||||
int64_t affectedRows = ctx ? ctx->affectedRows : 0;
|
||||
|
@ -184,7 +198,6 @@ int32_t qwBuildAndSendDropMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
|
|||
int32_t code = tmsgPutToQueue(&mgmt->msgCb, FETCH_QUEUE, &pNewMsg);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
QW_SCH_TASK_ELOG("put drop task msg to queue failed, vgId:%d, code:%s", mgmt->nodeId, tstrerror(code));
|
||||
rpcFreeCont(req);
|
||||
QW_ERR_RET(code);
|
||||
}
|
||||
|
||||
|
@ -374,8 +387,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int
|
|||
qwMsg.msgInfo.needFetch = msg->needFetch;
|
||||
|
||||
char * sql = strndup(msg->msg, msg->sqlLen);
|
||||
QW_SCH_TASK_DLOG("processQuery start, node:%p, type:%s, handle:%p, sql:%s", node, TMSG_INFO(pMsg->msgType), pMsg->info.handle, sql);
|
||||
|
||||
QW_SCH_TASK_DLOG("processQuery start, node:%p, type:%s, handle:%p, SQL:%s", node, TMSG_INFO(pMsg->msgType), pMsg->info.handle, sql);
|
||||
QW_ERR_RET(qwProcessQuery(QW_FPARAMS(), &qwMsg, sql));
|
||||
QW_SCH_TASK_DLOG("processQuery end, node:%p", node);
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTask
|
|||
|
||||
void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); }
|
||||
|
||||
void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) {
|
||||
void qwFreeTaskHandle(qTaskInfo_t *taskHandle) {
|
||||
// Note: free/kill may in RC
|
||||
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
|
||||
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
|
||||
|
@ -278,7 +278,7 @@ void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||
int32_t qwKillTaskHandle(SQWTaskCtx *ctx) {
|
||||
int32_t code = 0;
|
||||
// Note: free/kill may in RC
|
||||
qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle);
|
||||
|
@ -290,7 +290,7 @@ int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
|||
QW_RET(code);
|
||||
}
|
||||
|
||||
void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||
void qwFreeTaskCtx(SQWTaskCtx *ctx) {
|
||||
if (ctx->ctrlConnInfo.handle) {
|
||||
tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER);
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
|||
|
||||
// NO need to release dataConnInfo
|
||||
|
||||
qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle);
|
||||
qwFreeTaskHandle(&ctx->taskHandle);
|
||||
|
||||
if (ctx->sinkHandle) {
|
||||
dsDestroyDataSinker(ctx->sinkHandle);
|
||||
|
@ -336,7 +336,7 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
|
|||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
qwFreeTaskCtx(QW_FPARAMS(), &octx);
|
||||
qwFreeTaskCtx(&octx);
|
||||
|
||||
QW_TASK_DLOG_E("task ctx dropped");
|
||||
|
||||
|
@ -463,13 +463,21 @@ void qwDestroyImpl(void *pMgmt) {
|
|||
mgmt->hbTimer = NULL;
|
||||
taosTmrCleanUp(mgmt->timer);
|
||||
|
||||
// TODO STOP ALL QUERY
|
||||
|
||||
// TODO FREE ALL
|
||||
uint64_t qId, tId;
|
||||
int32_t eId;
|
||||
void *pIter = taosHashIterate(mgmt->ctxHash, NULL);
|
||||
while (pIter) {
|
||||
SQWTaskCtx *ctx = (SQWTaskCtx *)pIter;
|
||||
void *key = taosHashGetKey(pIter, NULL);
|
||||
QW_GET_QTID(key, qId, tId, eId);
|
||||
|
||||
qwFreeTaskCtx(ctx);
|
||||
QW_TASK_DLOG_E("task ctx freed");
|
||||
pIter = taosHashIterate(mgmt->ctxHash, pIter);
|
||||
}
|
||||
taosHashCleanup(mgmt->ctxHash);
|
||||
|
||||
void *pIter = taosHashIterate(mgmt->schHash, NULL);
|
||||
pIter = taosHashIterate(mgmt->schHash, NULL);
|
||||
while (pIter) {
|
||||
SQWSchStatus *sch = (SQWSchStatus *)pIter;
|
||||
qwDestroySchStatus(sch);
|
||||
|
|
|
@ -83,6 +83,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) {
|
|||
|
||||
// if *taskHandle is NULL, it's killed right now
|
||||
if (taskHandle) {
|
||||
qwDbgSimulateSleep();
|
||||
code = qExecTask(taskHandle, &pRes, &useconds);
|
||||
if (code) {
|
||||
if (code != TSDB_CODE_OPS_NOT_SUPPORT) {
|
||||
|
@ -293,11 +294,7 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
|
|||
|
||||
QW_LOCK(QW_WRITE, &ctx->lock);
|
||||
|
||||
if (QW_PHASE_PRE_FETCH == phase) {
|
||||
atomic_store_8((int8_t *)&ctx->queryFetched, true);
|
||||
} else {
|
||||
atomic_store_8(&ctx->phase, phase);
|
||||
}
|
||||
QW_SET_PHASE(ctx, phase);
|
||||
|
||||
if (atomic_load_8((int8_t *)&ctx->queryEnd)) {
|
||||
QW_TASK_ELOG_E("query already end");
|
||||
|
@ -370,6 +367,7 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
|
|||
}
|
||||
|
||||
_return:
|
||||
|
||||
if (ctx) {
|
||||
QW_UPDATE_RSP_CODE(ctx, code);
|
||||
|
||||
|
@ -390,7 +388,6 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
|
|||
int32_t code = 0;
|
||||
SQWTaskCtx *ctx = NULL;
|
||||
SRpcHandleInfo connInfo = {0};
|
||||
SRpcHandleInfo *rspConnection = NULL;
|
||||
|
||||
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
|
||||
|
||||
|
@ -403,13 +400,6 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
|
|||
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
|
||||
}
|
||||
|
||||
if (QW_PHASE_POST_QUERY == phase) {
|
||||
connInfo = ctx->ctrlConnInfo;
|
||||
rspConnection = &connInfo;
|
||||
|
||||
ctx->queryRsped = true;
|
||||
}
|
||||
|
||||
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
|
||||
if (QW_PHASE_POST_FETCH == phase) {
|
||||
QW_TASK_WLOG("drop received at wrong phase %s", qwPhaseStr(phase));
|
||||
|
@ -437,17 +427,23 @@ _return:
|
|||
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PART_SUCC);
|
||||
}
|
||||
|
||||
if (rspConnection) {
|
||||
qwBuildAndSendQueryRsp(input->msgType + 1, rspConnection, code, ctx);
|
||||
QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", rspConnection->handle, code, tstrerror(code));
|
||||
if (QW_PHASE_POST_QUERY == phase && ctx) {
|
||||
ctx->queryRsped = true;
|
||||
|
||||
bool rsped = false;
|
||||
SQWMsg qwMsg = {.msgType = ctx->msgType, .connInfo = ctx->ctrlConnInfo};
|
||||
qwDbgSimulateRedirect(&qwMsg, ctx, &rsped);
|
||||
qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
|
||||
if (!rsped) {
|
||||
qwBuildAndSendQueryRsp(input->msgType + 1, &ctx->ctrlConnInfo, code, ctx);
|
||||
QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx) {
|
||||
QW_UPDATE_RSP_CODE(ctx, code);
|
||||
|
||||
if (QW_PHASE_POST_FETCH != phase) {
|
||||
atomic_store_8(&ctx->phase, phase);
|
||||
}
|
||||
QW_SET_PHASE(ctx, phase);
|
||||
|
||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||
qwReleaseTaskCtx(mgmt, ctx);
|
||||
|
@ -488,8 +484,6 @@ int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
|
||||
QW_ERR_JRET(qwAddTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_INIT));
|
||||
|
||||
qwDbgResponseRedirect(qwMsg, ctx);
|
||||
|
||||
_return:
|
||||
|
||||
if (ctx) {
|
||||
|
@ -517,7 +511,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, const char* sql) {
|
|||
ctx->taskType = qwMsg->msgInfo.taskType;
|
||||
ctx->explain = qwMsg->msgInfo.explain;
|
||||
ctx->needFetch = qwMsg->msgInfo.needFetch;
|
||||
ctx->queryType = qwMsg->msgType;
|
||||
ctx->msgType = qwMsg->msgType;
|
||||
|
||||
//QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg);
|
||||
|
||||
|
@ -636,8 +630,8 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
|
||||
QW_LOCK(QW_WRITE, &ctx->lock);
|
||||
if (queryEnd || code || 0 == atomic_load_8((int8_t *)&ctx->queryContinue)) {
|
||||
// Note: if necessary, fetch need to put cquery to queue again
|
||||
atomic_store_8(&ctx->phase, 0);
|
||||
// Note: query is not running anymore
|
||||
QW_SET_PHASE(ctx, 0);
|
||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||
break;
|
||||
}
|
||||
|
@ -662,14 +656,13 @@ int32_t qwProcessFetch(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
|
||||
QW_ERR_JRET(qwGetTaskCtx(QW_FPARAMS(), &ctx));
|
||||
|
||||
ctx->queryType = qwMsg->msgType;
|
||||
ctx->msgType = qwMsg->msgType;
|
||||
ctx->dataConnInfo = qwMsg->connInfo;
|
||||
|
||||
SOutputData sOutput = {0};
|
||||
QW_ERR_JRET(qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput));
|
||||
|
||||
if (NULL == rsp) {
|
||||
ctx->dataConnInfo = qwMsg->connInfo;
|
||||
|
||||
QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_FETCH);
|
||||
} else {
|
||||
bool qComplete = (DS_BUF_EMPTY == sOutput.bufStatus && sOutput.queryEnd);
|
||||
|
@ -714,9 +707,16 @@ _return:
|
|||
}
|
||||
|
||||
if (code || rsp) {
|
||||
qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code);
|
||||
QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code),
|
||||
dataLen);
|
||||
bool rsped = false;
|
||||
if (ctx) {
|
||||
qwDbgSimulateRedirect(qwMsg, ctx, &rsped);
|
||||
qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
|
||||
}
|
||||
if (!rsped) {
|
||||
qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code);
|
||||
QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code),
|
||||
dataLen);
|
||||
}
|
||||
}
|
||||
|
||||
QW_RET(TSDB_CODE_SUCCESS);
|
||||
|
@ -724,7 +724,7 @@ _return:
|
|||
|
||||
int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
||||
int32_t code = 0;
|
||||
bool rsped = false;
|
||||
bool dropped = false;
|
||||
SQWTaskCtx *ctx = NULL;
|
||||
bool locked = false;
|
||||
|
||||
|
@ -740,18 +740,14 @@ int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
}
|
||||
|
||||
if (QW_QUERY_RUNNING(ctx)) {
|
||||
QW_ERR_JRET(qwKillTaskHandle(QW_FPARAMS(), ctx));
|
||||
QW_ERR_JRET(qwKillTaskHandle(ctx));
|
||||
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_DROP);
|
||||
} else if (ctx->phase > 0) {
|
||||
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
|
||||
rsped = true;
|
||||
} else {
|
||||
// task not started
|
||||
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
|
||||
dropped = true;
|
||||
}
|
||||
|
||||
if (!rsped) {
|
||||
ctx->ctrlConnInfo = qwMsg->connInfo;
|
||||
|
||||
if (!dropped) {
|
||||
QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_DROP);
|
||||
}
|
||||
|
||||
|
@ -954,7 +950,7 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SDeleteRes *pRes) {
|
|||
|
||||
_return:
|
||||
|
||||
qwFreeTaskCtx(QW_FPARAMS(), &ctx);
|
||||
qwFreeTaskCtx(&ctx);
|
||||
|
||||
QW_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
|
|
@ -2408,6 +2408,10 @@ int32_t irateScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam
|
|||
return nonCalcScalarFunction(pInput, inputNum, pOutput);
|
||||
}
|
||||
|
||||
int32_t diffScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||
return nonCalcScalarFunction(pInput, inputNum, pOutput);
|
||||
}
|
||||
|
||||
int32_t twaScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||
return avgScalarFunction(pInput, inputNum, pOutput);
|
||||
}
|
||||
|
|
|
@ -55,13 +55,11 @@ typedef enum {
|
|||
#define SCHEDULE_DEFAULT_MAX_TASK_NUM 1000
|
||||
#define SCHEDULE_DEFAULT_MAX_NODE_TABLE_NUM 200 // unit is TSDB_TABLE_NUM_UNIT
|
||||
#define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ
|
||||
#define SCHEDULE_DEFAULT_MAX_NODE_NUM 20
|
||||
|
||||
#define SCH_DEFAULT_TASK_TIMEOUT_USEC 10000000
|
||||
#define SCH_MAX_TASK_TIMEOUT_USEC 60000000
|
||||
#define SCH_MAX_CANDIDATE_EP_NUM TSDB_MAX_REPLICA
|
||||
|
||||
|
||||
|
||||
#define SCH_DEFAULT_MAX_RETRY_NUM 6
|
||||
|
||||
typedef struct SSchDebug {
|
||||
bool lockEnable;
|
||||
|
@ -211,6 +209,7 @@ typedef struct SSchTask {
|
|||
int32_t maxExecTimes; // task max exec times
|
||||
int32_t maxRetryTimes; // task max retry times
|
||||
int32_t retryTimes; // task retry times
|
||||
bool waitRetry; // wait for retry
|
||||
int32_t execId; // task current execute index
|
||||
SSchLevel *level; // level
|
||||
SRWLatch planLock; // task update plan lock
|
||||
|
@ -274,7 +273,8 @@ typedef struct SSchJob {
|
|||
int32_t errCode;
|
||||
SRWLatch resLock;
|
||||
SExecResult execRes;
|
||||
void *resData; //TODO free it or not
|
||||
void *fetchRes; //TODO free it or not
|
||||
bool fetched;
|
||||
int32_t resNumOfRows;
|
||||
SSchResInfo userRes;
|
||||
const char *sql;
|
||||
|
@ -326,7 +326,7 @@ extern SSchedulerMgmt schMgmt;
|
|||
#define SCH_IS_EXPLAIN_JOB(_job) (EXPLAIN_MODE_ANALYZE == (_job)->attr.explainMode)
|
||||
#define SCH_NETWORK_ERR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
|
||||
#define SCH_MERGE_TASK_NETWORK_ERR(_task, _code, _len) (SCH_NETWORK_ERR(_code) && (((_len) > 0) || (!SCH_IS_DATA_BIND_TASK(_task))))
|
||||
#define SCH_REDIRECT_MSGTYPE(_msgType) ((_msgType) == TDMT_SCH_QUERY || (_msgType) == TDMT_SCH_MERGE_QUERY || (_msgType) == TDMT_SCH_FETCH || (_msgType) == TDMT_SCH_MERGE_FETCH)
|
||||
#define SCH_REDIRECT_MSGTYPE(_msgType) ((_msgType) == TDMT_SCH_LINK_BROKEN || (_msgType) == TDMT_SCH_QUERY || (_msgType) == TDMT_SCH_MERGE_QUERY || (_msgType) == TDMT_SCH_FETCH || (_msgType) == TDMT_SCH_MERGE_FETCH)
|
||||
#define SCH_TASK_NEED_REDIRECT(_task, _msgType, _code, _rspLen) (SCH_REDIRECT_MSGTYPE(_msgType) && (NEED_SCHEDULER_REDIRECT_ERROR(_code) || SCH_MERGE_TASK_NETWORK_ERR((_task), (_code), (_rspLen))))
|
||||
#define SCH_NEED_RETRY(_msgType, _code) ((SCH_NETWORK_ERR(_code) && SCH_REDIRECT_MSGTYPE(_msgType)) || (_code) == TSDB_CODE_SCH_TIMEOUT_ERROR)
|
||||
|
||||
|
@ -368,6 +368,8 @@ extern SSchedulerMgmt schMgmt;
|
|||
qError("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
|
||||
#define SCH_TASK_DLOG(param, ...) \
|
||||
qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
|
||||
#define SCH_TASK_TLOG(param, ...) \
|
||||
qTrace("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
|
||||
#define SCH_TASK_DLOGL(param, ...) \
|
||||
qDebugL("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
|
||||
#define SCH_TASK_WLOG(param, ...) \
|
||||
|
@ -441,7 +443,7 @@ void schFreeRpcCtx(SRpcCtx *pCtx);
|
|||
int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp);
|
||||
bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus);
|
||||
int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask);
|
||||
int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp);
|
||||
int32_t schSaveJobExecRes(SSchJob *pJob, SQueryTableRsp *rsp);
|
||||
int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp);
|
||||
void schProcessOnDataFetched(SSchJob *job);
|
||||
int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask);
|
||||
|
@ -492,7 +494,7 @@ int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask);
|
|||
void schDirectPostJobRes(SSchedulerReq* pReq, int32_t errCode);
|
||||
int32_t schHandleJobFailure(SSchJob *pJob, int32_t errCode);
|
||||
int32_t schHandleJobDrop(SSchJob *pJob, int32_t errCode);
|
||||
bool schChkCurrentOp(SSchJob *pJob, int32_t op, bool sync);
|
||||
bool schChkCurrentOp(SSchJob *pJob, int32_t op, int8_t sync);
|
||||
|
||||
extern SSchDebug gSCHDebug;
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
|
|||
break;
|
||||
case JOB_TASK_STATUS_PART_SUCC:
|
||||
if (newStatus != JOB_TASK_STATUS_FAIL && newStatus != JOB_TASK_STATUS_SUCC &&
|
||||
newStatus != JOB_TASK_STATUS_DROP) {
|
||||
newStatus != JOB_TASK_STATUS_DROP && newStatus != JOB_TASK_STATUS_EXEC) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
|
@ -389,13 +389,18 @@ int32_t schDumpJobExecRes(SSchJob* pJob, SExecResult* pRes) {
|
|||
|
||||
int32_t schDumpJobFetchRes(SSchJob* pJob, void** pData) {
|
||||
int32_t code = 0;
|
||||
if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) {
|
||||
SCH_ERR_RET(schSwitchJobStatus(pJob, JOB_TASK_STATUS_SUCC, NULL));
|
||||
|
||||
SCH_LOCK(SCH_WRITE, &pJob->resLock);
|
||||
|
||||
pJob->fetched = true;
|
||||
|
||||
if (pJob->fetchRes && ((SRetrieveTableRsp *)pJob->fetchRes)->completed) {
|
||||
SCH_ERR_JRET(schSwitchJobStatus(pJob, JOB_TASK_STATUS_SUCC, NULL));
|
||||
}
|
||||
|
||||
while (true) {
|
||||
*pData = atomic_load_ptr(&pJob->resData);
|
||||
if (*pData != atomic_val_compare_exchange_ptr(&pJob->resData, *pData, NULL)) {
|
||||
*pData = atomic_load_ptr(&pJob->fetchRes);
|
||||
if (*pData != atomic_val_compare_exchange_ptr(&pJob->fetchRes, *pData, NULL)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -414,7 +419,11 @@ int32_t schDumpJobFetchRes(SSchJob* pJob, void** pData) {
|
|||
|
||||
SCH_JOB_DLOG("fetch done, totalRows:%d", pJob->resNumOfRows);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_return:
|
||||
|
||||
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t schNotifyUserExecRes(SSchJob* pJob) {
|
||||
|
@ -512,8 +521,12 @@ int32_t schHandleJobDrop(SSchJob *pJob, int32_t errCode) {
|
|||
}
|
||||
|
||||
|
||||
int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) {
|
||||
schPostJobRes(pJob, SCH_OP_EXEC);
|
||||
int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) {
|
||||
if (schChkCurrentOp(pJob, SCH_OP_FETCH, -1)) {
|
||||
SCH_ERR_RET(schLaunchFetchTask(pJob));
|
||||
} else {
|
||||
schPostJobRes(pJob, 0);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -526,7 +539,7 @@ int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRs
|
|||
SCH_TASK_DLOG("got explain rsp, rows:%d, complete:%d", htonl(pRsp->numOfRows), pRsp->completed);
|
||||
|
||||
atomic_store_32(&pJob->resNumOfRows, htonl(pRsp->numOfRows));
|
||||
atomic_store_ptr(&pJob->resData, pRsp);
|
||||
atomic_store_ptr(&pJob->fetchRes, pRsp);
|
||||
|
||||
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCC);
|
||||
|
||||
|
@ -561,7 +574,7 @@ int32_t schLaunchJobLowerLevel(SSchJob *pJob, SSchTask *pTask) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) {
|
||||
int32_t schSaveJobExecRes(SSchJob *pJob, SQueryTableRsp *rsp) {
|
||||
if (rsp->tbFName[0]) {
|
||||
SCH_LOCK(SCH_WRITE, &pJob->resLock);
|
||||
|
||||
|
@ -600,7 +613,7 @@ int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask) {
|
|||
|
||||
int32_t schLaunchJob(SSchJob *pJob) {
|
||||
if (EXPLAIN_MODE_STATIC == pJob->attr.explainMode) {
|
||||
SCH_ERR_RET(qExecStaticExplain(pJob->pDag, (SRetrieveTableRsp **)&pJob->resData));
|
||||
SCH_ERR_RET(qExecStaticExplain(pJob->pDag, (SRetrieveTableRsp **)&pJob->fetchRes));
|
||||
SCH_ERR_RET(schSwitchJobStatus(pJob, JOB_TASK_STATUS_PART_SUCC, NULL));
|
||||
} else {
|
||||
SSchLevel *level = taosArrayGet(pJob->levels, pJob->levelIdx);
|
||||
|
@ -661,7 +674,7 @@ void schFreeJobImpl(void *job) {
|
|||
qDestroyQueryPlan(pJob->pDag);
|
||||
|
||||
taosMemoryFreeClear(pJob->userRes.execRes);
|
||||
taosMemoryFreeClear(pJob->resData);
|
||||
taosMemoryFreeClear(pJob->fetchRes);
|
||||
taosMemoryFree(pJob);
|
||||
|
||||
int32_t jobNum = atomic_sub_fetch_32(&schMgmt.jobNum, 1);
|
||||
|
@ -795,9 +808,14 @@ void schDirectPostJobRes(SSchedulerReq* pReq, int32_t errCode) {
|
|||
}
|
||||
}
|
||||
|
||||
bool schChkCurrentOp(SSchJob *pJob, int32_t op, bool sync) {
|
||||
bool schChkCurrentOp(SSchJob *pJob, int32_t op, int8_t sync) {
|
||||
bool r = false;
|
||||
SCH_LOCK(SCH_READ, &pJob->opStatus.lock);
|
||||
bool r = (pJob->opStatus.op == op) && (pJob->opStatus.syncReq == sync);
|
||||
if (sync >= 0) {
|
||||
r = (pJob->opStatus.op == op) && (pJob->opStatus.syncReq == sync);
|
||||
} else {
|
||||
r = (pJob->opStatus.op == op);
|
||||
}
|
||||
SCH_UNLOCK(SCH_READ, &pJob->opStatus.lock);
|
||||
|
||||
return r;
|
||||
|
|
|
@ -256,7 +256,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
|
|||
|
||||
SCH_ERR_JRET(rsp->code);
|
||||
|
||||
SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp));
|
||||
SCH_ERR_JRET(schSaveJobExecRes(pJob, rsp));
|
||||
|
||||
atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows);
|
||||
|
||||
|
@ -277,8 +277,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
|
|||
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
||||
if (pJob->resData) {
|
||||
SCH_TASK_ELOG("explain result is already generated, res:%p", pJob->resData);
|
||||
if (pJob->fetchRes) {
|
||||
SCH_TASK_ELOG("explain result is already generated, res:%p", pJob->fetchRes);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
|
@ -325,13 +325,13 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pJob->resData) {
|
||||
SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->resData);
|
||||
if (pJob->fetchRes) {
|
||||
SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->fetchRes);
|
||||
taosMemoryFreeClear(rsp);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
atomic_store_ptr(&pJob->resData, rsp);
|
||||
atomic_store_ptr(&pJob->fetchRes, rsp);
|
||||
atomic_add_fetch_32(&pJob->resNumOfRows, htonl(rsp->numOfRows));
|
||||
|
||||
if (rsp->completed) {
|
||||
|
@ -1010,6 +1010,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
|
|||
memcpy(pMsg->msg + len, pTask->msg, pTask->msgLen);
|
||||
|
||||
persistHandle = true;
|
||||
SCH_SET_TASK_HANDLE(pTask, rpcAllocHandle());
|
||||
break;
|
||||
}
|
||||
case TDMT_SCH_FETCH:
|
||||
|
|
|
@ -47,10 +47,10 @@ void schFreeTask(SSchJob *pJob, SSchTask *pTask) {
|
|||
|
||||
void schInitTaskRetryTimes(SSchJob *pJob, SSchTask *pTask, SSchLevel *pLevel) {
|
||||
if (SCH_IS_DATA_BIND_TASK(pTask) || (!SCH_IS_QUERY_JOB(pJob)) || (SCH_ALL != schMgmt.cfg.schPolicy)) {
|
||||
pTask->maxRetryTimes = SCH_MAX_CANDIDATE_EP_NUM;
|
||||
pTask->maxRetryTimes = SCH_DEFAULT_MAX_RETRY_NUM;
|
||||
} else {
|
||||
int32_t nodeNum = taosArrayGetSize(pJob->nodeList);
|
||||
pTask->maxRetryTimes = TMAX(nodeNum, SCH_MAX_CANDIDATE_EP_NUM);
|
||||
pTask->maxRetryTimes = TMAX(nodeNum, SCH_DEFAULT_MAX_RETRY_NUM);
|
||||
}
|
||||
|
||||
pTask->maxExecTimes = pTask->maxRetryTimes * (pLevel->level + 1);
|
||||
|
@ -64,11 +64,11 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *
|
|||
pTask->execId = -1;
|
||||
pTask->timeoutUsec = SCH_DEFAULT_TASK_TIMEOUT_USEC;
|
||||
pTask->taskId = schGenTaskId();
|
||||
pTask->execNodes =
|
||||
taosHashInit(SCH_MAX_CANDIDATE_EP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
|
||||
schInitTaskRetryTimes(pJob, pTask, pLevel);
|
||||
|
||||
pTask->execNodes =
|
||||
taosHashInit(pTask->maxExecTimes, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
pTask->profile.execTime = taosArrayInit(pTask->maxExecTimes, sizeof(int64_t));
|
||||
if (NULL == pTask->execNodes || NULL == pTask->profile.execTime) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
|
@ -125,8 +125,8 @@ int32_t schDropTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle, int32_
|
|||
SCH_TASK_DLOG("execId %d removed from execNodeList", execId);
|
||||
}
|
||||
|
||||
if (execId != pTask->execId) { // ignore it
|
||||
SCH_TASK_DLOG("execId %d is not current execId %d", execId, pTask->execId);
|
||||
if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it
|
||||
SCH_TASK_DLOG("execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR);
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,17 @@ int32_t schUpdateTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle, int3
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it
|
||||
SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSchNodeInfo *nodeInfo = taosHashGet(pTask->execNodes, &execId, sizeof(execId));
|
||||
if (NULL == nodeInfo) { // ignore it
|
||||
SCH_TASK_DLOG("handle not updated since execId %d already not exist, current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
nodeInfo->handle = handle;
|
||||
|
||||
SCH_TASK_DLOG("handle updated to %p for execId %d", handle, execId);
|
||||
|
@ -335,6 +345,7 @@ int32_t schDoTaskRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
pTask->waitRetry = true;
|
||||
schDropTaskOnExecNode(pJob, pTask);
|
||||
taosHashClear(pTask->execNodes);
|
||||
SCH_ERR_JRET(schRemoveTaskFromExecList(pJob, pTask));
|
||||
|
@ -394,6 +405,18 @@ _return:
|
|||
int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32_t rspCode) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (JOB_TASK_STATUS_PART_SUCC == pJob->status) {
|
||||
SCH_LOCK(SCH_WRITE, &pJob->resLock);
|
||||
if (pJob->fetched) {
|
||||
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
|
||||
SCH_TASK_ELOG("already fetched while got error %s", tstrerror(rspCode));
|
||||
SCH_ERR_RET(rspCode);
|
||||
}
|
||||
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
|
||||
|
||||
schUpdateJobStatus(pJob, JOB_TASK_STATUS_EXEC);
|
||||
}
|
||||
|
||||
if (SCH_IS_DATA_BIND_TASK(pTask)) {
|
||||
if (NULL == pData->pEpSet) {
|
||||
SCH_TASK_ELOG("no epset updated while got error %s", tstrerror(rspCode));
|
||||
|
@ -591,7 +614,7 @@ int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
|
|||
if (pJob->nodeList) {
|
||||
nodeNum = taosArrayGetSize(pJob->nodeList);
|
||||
|
||||
for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
|
||||
for (int32_t i = 0; i < nodeNum; ++i) {
|
||||
SQueryNodeLoad *nload = taosArrayGet(pJob->nodeList, i);
|
||||
SQueryNodeAddr *naddr = &nload->addr;
|
||||
|
||||
|
@ -600,8 +623,8 @@ int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
|
|||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SCH_TASK_DLOG("set %dth candidate addr, id %d, fqdn:%s, port:%d", i, naddr->nodeId, SCH_GET_CUR_EP(naddr)->fqdn,
|
||||
SCH_GET_CUR_EP(naddr)->port);
|
||||
SCH_TASK_TLOG("set %dth candidate addr, id %d, inUse:%d/%d, fqdn:%s, port:%d", i, naddr->nodeId, naddr->epSet.inUse, naddr->epSet.numOfEps,
|
||||
SCH_GET_CUR_EP(naddr)->fqdn, SCH_GET_CUR_EP(naddr)->port);
|
||||
|
||||
++addNum;
|
||||
}
|
||||
|
@ -621,9 +644,9 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) {
|
|||
}
|
||||
|
||||
pTask->candidateIdx = 0;
|
||||
pTask->candidateAddrs = taosArrayInit(SCH_MAX_CANDIDATE_EP_NUM, sizeof(SQueryNodeAddr));
|
||||
pTask->candidateAddrs = taosArrayInit(SCHEDULE_DEFAULT_MAX_NODE_NUM, sizeof(SQueryNodeAddr));
|
||||
if (NULL == pTask->candidateAddrs) {
|
||||
SCH_TASK_ELOG("taosArrayInit %d condidate addrs failed", SCH_MAX_CANDIDATE_EP_NUM);
|
||||
SCH_TASK_ELOG("taosArrayInit %d condidate addrs failed", SCHEDULE_DEFAULT_MAX_NODE_NUM);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
|
@ -790,6 +813,7 @@ int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
|
|||
atomic_add_fetch_32(&pTask->level->taskLaunchedNum, 1);
|
||||
pTask->execId++;
|
||||
pTask->retryTimes++;
|
||||
pTask->waitRetry = false;
|
||||
|
||||
SCH_TASK_DLOG("start to launch task, execId %d, retry %d", pTask->execId, pTask->retryTimes);
|
||||
|
||||
|
@ -885,9 +909,9 @@ void schDropTaskInHashList(SSchJob *pJob, SHashObj *list) {
|
|||
int32_t schLaunchFetchTask(SSchJob *pJob) {
|
||||
int32_t code = 0;
|
||||
|
||||
void *resData = atomic_load_ptr(&pJob->resData);
|
||||
if (resData) {
|
||||
SCH_JOB_DLOG("res already fetched, res:%p", resData);
|
||||
void *fetchRes = atomic_load_ptr(&pJob->fetchRes);
|
||||
if (fetchRes) {
|
||||
SCH_JOB_DLOG("res already fetched, res:%p", fetchRes);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -223,6 +223,7 @@ void syncNodeVoteForSelf(SSyncNode* pSyncNode);
|
|||
|
||||
// snapshot --------------
|
||||
bool syncNodeHasSnapshot(SSyncNode* pSyncNode);
|
||||
void syncNodeMaybeUpdateCommitBySnapshot(SSyncNode* pSyncNode);
|
||||
|
||||
SyncIndex syncNodeGetLastIndex(SSyncNode* pSyncNode);
|
||||
SyncTerm syncNodeGetLastTerm(SSyncNode* pSyncNode);
|
||||
|
|
|
@ -711,6 +711,9 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
|
|||
syncNodeEventLog(ths, logBuf);
|
||||
} while (0);
|
||||
|
||||
// maybe update commit index by snapshot
|
||||
syncNodeMaybeUpdateCommitBySnapshot(ths);
|
||||
|
||||
// prepare response msg
|
||||
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
|
||||
pReply->srcId = ths->myRaftId;
|
||||
|
@ -718,7 +721,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
|
|||
pReply->term = ths->pRaftStore->currentTerm;
|
||||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = false;
|
||||
pReply->matchIndex = SYNC_INDEX_INVALID;
|
||||
pReply->matchIndex = ths->commitIndex;
|
||||
|
||||
// msg event log
|
||||
do {
|
||||
|
|
|
@ -147,6 +147,15 @@ static void syncNodeStartSnapshotOnce(SSyncNode* ths, SyncIndex beginIndex, Sync
|
|||
int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntriesReply* pMsg) {
|
||||
int32_t ret = 0;
|
||||
|
||||
// print log
|
||||
do {
|
||||
char logBuf[256];
|
||||
snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries-reply, term:%lu, match:%ld, success:%d", pMsg->term,
|
||||
pMsg->matchIndex, pMsg->success);
|
||||
syncNodeEventLog(ths, logBuf);
|
||||
|
||||
} while (0);
|
||||
|
||||
// if already drop replica, do not process
|
||||
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
|
||||
syncNodeEventLog(ths, "recv sync-append-entries-reply, maybe replica already dropped");
|
||||
|
@ -238,7 +247,14 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
|
|||
SSnapshot oldSnapshot;
|
||||
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &oldSnapshot);
|
||||
SyncTerm newSnapshotTerm = oldSnapshot.lastApplyTerm;
|
||||
syncNodeStartSnapshotOnce(ths, SYNC_INDEX_BEGIN, nextIndex, newSnapshotTerm, pMsg);
|
||||
|
||||
SyncIndex endIndex;
|
||||
if (ths->pLogStore->syncLogExist(ths->pLogStore, nextIndex + 1)) {
|
||||
endIndex = nextIndex;
|
||||
} else {
|
||||
endIndex = oldSnapshot.lastApplyIndex;
|
||||
}
|
||||
syncNodeStartSnapshotOnce(ths, pMsg->matchIndex + 1, endIndex, newSnapshotTerm, pMsg);
|
||||
|
||||
// get sender
|
||||
SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(pMsg->srcId));
|
||||
|
@ -256,6 +272,11 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
|
|||
}
|
||||
syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), nextIndex);
|
||||
|
||||
SyncIndex oldMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
|
||||
if (pMsg->matchIndex > oldMatchIndex) {
|
||||
syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), pMsg->matchIndex);
|
||||
}
|
||||
|
||||
// event log, update next-index
|
||||
do {
|
||||
char host[64];
|
||||
|
|
|
@ -1083,6 +1083,17 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
|
|||
return pSyncNode;
|
||||
}
|
||||
|
||||
void syncNodeMaybeUpdateCommitBySnapshot(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode->pFsm != NULL && pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
|
||||
SSnapshot snapshot;
|
||||
int32_t code = pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
|
||||
ASSERT(code == 0);
|
||||
if (snapshot.lastApplyIndex > pSyncNode->commitIndex) {
|
||||
pSyncNode->commitIndex = snapshot.lastApplyIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void syncNodeStart(SSyncNode* pSyncNode) {
|
||||
// start raft
|
||||
if (pSyncNode->replicaNum == 1) {
|
||||
|
|
|
@ -170,7 +170,7 @@ void rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) {
|
|||
transSetDefaultAddr(thandle, ip, fqdn);
|
||||
}
|
||||
|
||||
int64_t rpcAllocHandle() { return transAllocHandle(); }
|
||||
void* rpcAllocHandle() { return (void*)transAllocHandle(); }
|
||||
|
||||
int32_t rpcInit() {
|
||||
transInit();
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/** Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
|
@ -809,7 +810,7 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) {
|
|||
conn = exh->handle;
|
||||
if (conn == NULL) {
|
||||
conn = getConnFromPool(pThrd->pool, EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet));
|
||||
*ignore = (conn && 0 == specifyConnRef(conn, true, refId)) ? false : true;
|
||||
if (conn != NULL) specifyConnRef(conn, true, refId);
|
||||
}
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
}
|
||||
|
@ -849,14 +850,20 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
|
|||
bool ignore = false;
|
||||
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
|
||||
if (ignore == true) {
|
||||
tError("ignore msg");
|
||||
return;
|
||||
}
|
||||
|
||||
if (conn != NULL) {
|
||||
transCtxMerge(&conn->ctx, &pCtx->appCtx);
|
||||
transQueuePush(&conn->cliMsgs, pMsg);
|
||||
cliSend(conn);
|
||||
} else {
|
||||
conn = cliCreateConn(pThrd);
|
||||
|
||||
int64_t refId = (int64_t)pMsg->msg.info.handle;
|
||||
if (refId != 0) specifyConnRef(conn, true, refId);
|
||||
|
||||
transCtxMerge(&conn->ctx, &pCtx->appCtx);
|
||||
transQueuePush(&conn->cliMsgs, pMsg);
|
||||
|
||||
|
@ -1206,7 +1213,13 @@ SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle, bool* validHandle) {
|
|||
if (idx < 0) return NULL;
|
||||
return ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
|
||||
}
|
||||
return transGetWorkThrdFromHandle(handle, validHandle);
|
||||
SCliThrd* pThrd = transGetWorkThrdFromHandle(handle, validHandle);
|
||||
if (*validHandle == true && pThrd == NULL) {
|
||||
int idx = cliRBChoseIdx(trans);
|
||||
if (idx < 0) return NULL;
|
||||
pThrd = ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
|
||||
}
|
||||
return pThrd;
|
||||
}
|
||||
void transReleaseCliHandle(void* handle) {
|
||||
int idx = -1;
|
||||
|
|
|
@ -336,8 +336,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_NOT_EXIST, "Table does not exists
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TABLE_ACTION, "Invalid table action")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_COL_ALREADY_EXISTS, "Table column already exists")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_TABLE_COL_NOT_EXISTS, "Table column not exists")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_READ_END, "Read end")
|
||||
|
||||
|
||||
// tsdb
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID")
|
||||
|
|
|
@ -79,22 +79,39 @@ class TDSql:
|
|||
self.queryResult = None
|
||||
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||
|
||||
def query(self, sql, row_tag=None):
|
||||
def query(self, sql, row_tag=None,queyTimes=10):
|
||||
self.sql = sql
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
traceback.print_exc()
|
||||
raise Exception(repr(e))
|
||||
if row_tag:
|
||||
return self.queryResult
|
||||
return self.queryRows
|
||||
i=1
|
||||
while i <= queyTimes:
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
if row_tag:
|
||||
return self.queryResult
|
||||
return self.queryRows
|
||||
except Exception as e:
|
||||
i+=1
|
||||
tdLog.notice("Try to query again, query times: %d "%i)
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
tdLog.notice("Try the last query ")
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
if row_tag:
|
||||
return self.queryResult
|
||||
return self.queryRows
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
traceback.print_exc()
|
||||
raise Exception(repr(e))
|
||||
|
||||
|
||||
def is_err_sql(self, sql):
|
||||
err_flag = True
|
||||
|
@ -266,16 +283,27 @@ class TDSql:
|
|||
time.sleep(1)
|
||||
continue
|
||||
|
||||
def execute(self, sql):
|
||||
def execute(self, sql,queyTimes=10):
|
||||
self.sql = sql
|
||||
try:
|
||||
self.affectedRows = self.cursor.execute(sql)
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
return self.affectedRows
|
||||
i=1
|
||||
while i <= queyTimes:
|
||||
try:
|
||||
self.affectedRows = self.cursor.execute(sql)
|
||||
return self.affectedRows
|
||||
except Exception as e:
|
||||
i+=1
|
||||
tdLog.notice("Try to execute sql again, query times: %d "%i)
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
tdLog.notice("Try the last execute sql ")
|
||||
self.affectedRows = self.cursor.execute(sql)
|
||||
return self.affectedRows
|
||||
except Exception as e:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||
raise Exception(repr(e))
|
||||
|
||||
def checkAffectedRows(self, expectAffectedRows):
|
||||
if self.affectedRows != expectAffectedRows:
|
||||
|
|
|
@ -309,6 +309,7 @@
|
|||
./test.sh -f tsim/valgrind/checkError1.sim
|
||||
./test.sh -f tsim/valgrind/checkError2.sim
|
||||
./test.sh -f tsim/valgrind/checkError3.sim
|
||||
# jira ./test.sh -f tsim/valgrind/checkError4.sim
|
||||
|
||||
# --- vnode
|
||||
# unsupport ./test.sh -f tsim/vnode/replica3_basic.sim
|
||||
|
|
|
@ -91,21 +91,21 @@ if $rows != $vgroups then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][4] == LEADER then
|
||||
if $data[0][6] == FOLLOWER then
|
||||
if $data[0][8] == FOLLOWER then
|
||||
if $data[0][4] == leader then
|
||||
if $data[0][6] == follower then
|
||||
if $data[0][8] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[0][6] == LEADER then
|
||||
if $data[0][4] == FOLLOWER then
|
||||
if $data[0][8] == FOLLOWER then
|
||||
elif $data[0][6] == leader then
|
||||
if $data[0][4] == follower then
|
||||
if $data[0][8] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[0][8] == LEADER then
|
||||
if $data[0][4] == FOLLOWER then
|
||||
if $data[0][6] == FOLLOWER then
|
||||
elif $data[0][8] == leader then
|
||||
if $data[0][4] == follower then
|
||||
if $data[0][6] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -113,21 +113,21 @@ else
|
|||
goto check_vg_ready
|
||||
endi
|
||||
|
||||
if $data[1][4] == LEADER then
|
||||
if $data[1][6] == FOLLOWER then
|
||||
if $data[1][8] == FOLLOWER then
|
||||
if $data[1][4] == leader then
|
||||
if $data[1][6] == follower then
|
||||
if $data[1][8] == follower then
|
||||
print ---- vgroup $data[1][0] leader locate on dnode $data[1][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[1][6] == LEADER then
|
||||
if $data[1][4] == FOLLOWER then
|
||||
if $data[1][8] == FOLLOWER then
|
||||
elif $data[1][6] == leader then
|
||||
if $data[1][4] == follower then
|
||||
if $data[1][8] == follower then
|
||||
print ---- vgroup $data[1][0] leader locate on dnode $data[1][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[1][8] == LEADER then
|
||||
if $data[1][4] == FOLLOWER then
|
||||
if $data[1][6] == FOLLOWER then
|
||||
elif $data[1][8] == leader then
|
||||
if $data[1][4] == follower then
|
||||
if $data[1][6] == follower then
|
||||
print ---- vgroup $data[1][0] leader locate on dnode $data[1][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -135,21 +135,21 @@ else
|
|||
goto check_vg_ready
|
||||
endi
|
||||
|
||||
if $data[2][4] == LEADER then
|
||||
if $data[2][6] == FOLLOWER then
|
||||
if $data[2][8] == FOLLOWER then
|
||||
if $data[2][4] == leader then
|
||||
if $data[2][6] == follower then
|
||||
if $data[2][8] == follower then
|
||||
print ---- vgroup $data[2][0] leader locate on dnode $data[2][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[2][6] == LEADER then
|
||||
if $data[2][4] == FOLLOWER then
|
||||
if $data[2][8] == FOLLOWER then
|
||||
elif $data[2][6] == leader then
|
||||
if $data[2][4] == follower then
|
||||
if $data[2][8] == follower then
|
||||
print ---- vgroup $data[2][0] leader locate on dnode $data[2][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[2][8] == LEADER then
|
||||
if $data[2][4] == FOLLOWER then
|
||||
if $data[2][6] == FOLLOWER then
|
||||
elif $data[2][8] == leader then
|
||||
if $data[2][4] == follower then
|
||||
if $data[2][6] == follower then
|
||||
print ---- vgroup $data[2][0] leader locate on dnode $data[2][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -157,21 +157,21 @@ else
|
|||
goto check_vg_ready
|
||||
endi
|
||||
|
||||
if $data[3][4] == LEADER then
|
||||
if $data[3][6] == FOLLOWER then
|
||||
if $data[3][8] == FOLLOWER then
|
||||
if $data[3][4] == leader then
|
||||
if $data[3][6] == follower then
|
||||
if $data[3][8] == follower then
|
||||
print ---- vgroup $data[3][0] leader locate on dnode $data[3][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[3][6] == LEADER then
|
||||
if $data[3][4] == FOLLOWER then
|
||||
if $data[3][8] == FOLLOWER then
|
||||
elif $data[3][6] == leader then
|
||||
if $data[3][4] == follower then
|
||||
if $data[3][8] == follower then
|
||||
print ---- vgroup $data[3][0] leader locate on dnode $data[3][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[3][8] == LEADER then
|
||||
if $data[3][4] == FOLLOWER then
|
||||
if $data[3][6] == FOLLOWER then
|
||||
elif $data[3][8] == leader then
|
||||
if $data[3][4] == follower then
|
||||
if $data[3][6] == follower then
|
||||
print ---- vgroup $data[3][0] leader locate on dnode $data[3][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -179,21 +179,21 @@ else
|
|||
goto check_vg_ready
|
||||
endi
|
||||
|
||||
if $data[4][4] == LEADER then
|
||||
if $data[4][6] == FOLLOWER then
|
||||
if $data[4][8] == FOLLOWER then
|
||||
if $data[4][4] == leader then
|
||||
if $data[4][6] == follower then
|
||||
if $data[4][8] == follower then
|
||||
print ---- vgroup $data[4][0] leader locate on dnode $data[4][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[4][6] == LEADER then
|
||||
if $data[4][4] == FOLLOWER then
|
||||
if $data[4][8] == FOLLOWER then
|
||||
elif $data[4][6] == leader then
|
||||
if $data[4][4] == follower then
|
||||
if $data[4][8] == follower then
|
||||
print ---- vgroup $data[4][0] leader locate on dnode $data[4][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[4][8] == LEADER then
|
||||
if $data[4][4] == FOLLOWER then
|
||||
if $data[4][6] == FOLLOWER then
|
||||
elif $data[4][8] == leader then
|
||||
if $data[4][4] == follower then
|
||||
if $data[4][6] == follower then
|
||||
print ---- vgroup $data[4][0] leader locate on dnode $data[4][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -286,13 +286,13 @@ if $data[0][0] != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][2] != LEADER then
|
||||
if $data[0][2] != leader then
|
||||
goto check_mnode_ready_2
|
||||
endi
|
||||
if $data[1][2] != FOLLOWER then
|
||||
if $data[1][2] != follower then
|
||||
goto check_mnode_ready_2
|
||||
endi
|
||||
if $data[2][2] != FOLLOWER then
|
||||
if $data[2][2] != follower then
|
||||
goto check_mnode_ready_2
|
||||
endi
|
||||
|
||||
|
@ -318,21 +318,21 @@ if $rows != $vgroups then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][4] == LEADER then
|
||||
if $data[0][6] == FOLLOWER then
|
||||
if $data[0][8] == FOLLOWER then
|
||||
if $data[0][4] == leader then
|
||||
if $data[0][6] == follower then
|
||||
if $data[0][8] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[0][6] == LEADER then
|
||||
if $data[0][4] == FOLLOWER then
|
||||
if $data[0][8] == FOLLOWER then
|
||||
elif $data[0][6] == leader then
|
||||
if $data[0][4] == follower then
|
||||
if $data[0][8] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[0][8] == LEADER then
|
||||
if $data[0][4] == FOLLOWER then
|
||||
if $data[0][6] == FOLLOWER then
|
||||
elif $data[0][8] == leader then
|
||||
if $data[0][4] == follower then
|
||||
if $data[0][6] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -340,21 +340,21 @@ else
|
|||
goto check_vg_ready1
|
||||
endi
|
||||
|
||||
if $data[1][4] == LEADER then
|
||||
if $data[1][6] == FOLLOWER then
|
||||
if $data[1][8] == FOLLOWER then
|
||||
if $data[1][4] == leader then
|
||||
if $data[1][6] == follower then
|
||||
if $data[1][8] == follower then
|
||||
print ---- vgroup $data[1][0] leader locate on dnode $data[1][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[1][6] == LEADER then
|
||||
if $data[1][4] == FOLLOWER then
|
||||
if $data[1][8] == FOLLOWER then
|
||||
elif $data[1][6] == leader then
|
||||
if $data[1][4] == follower then
|
||||
if $data[1][8] == follower then
|
||||
print ---- vgroup $data[1][0] leader locate on dnode $data[1][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[1][8] == LEADER then
|
||||
if $data[1][4] == FOLLOWER then
|
||||
if $data[1][6] == FOLLOWER then
|
||||
elif $data[1][8] == leader then
|
||||
if $data[1][4] == follower then
|
||||
if $data[1][6] == follower then
|
||||
print ---- vgroup $data[1][0] leader locate on dnode $data[1][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -362,21 +362,21 @@ else
|
|||
goto check_vg_ready1
|
||||
endi
|
||||
|
||||
if $data[2][4] == LEADER then
|
||||
if $data[2][6] == FOLLOWER then
|
||||
if $data[2][8] == FOLLOWER then
|
||||
if $data[2][4] == leader then
|
||||
if $data[2][6] == follower then
|
||||
if $data[2][8] == follower then
|
||||
print ---- vgroup $data[2][0] leader locate on dnode $data[2][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[2][6] == LEADER then
|
||||
if $data[2][4] == FOLLOWER then
|
||||
if $data[2][8] == FOLLOWER then
|
||||
elif $data[2][6] == leader then
|
||||
if $data[2][4] == follower then
|
||||
if $data[2][8] == follower then
|
||||
print ---- vgroup $data[2][0] leader locate on dnode $data[2][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[2][8] == LEADER then
|
||||
if $data[2][4] == FOLLOWER then
|
||||
if $data[2][6] == FOLLOWER then
|
||||
elif $data[2][8] == leader then
|
||||
if $data[2][4] == follower then
|
||||
if $data[2][6] == follower then
|
||||
print ---- vgroup $data[2][0] leader locate on dnode $data[2][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -384,21 +384,21 @@ else
|
|||
goto check_vg_ready1
|
||||
endi
|
||||
|
||||
if $data[3][4] == LEADER then
|
||||
if $data[3][6] == FOLLOWER then
|
||||
if $data[3][8] == FOLLOWER then
|
||||
if $data[3][4] == leader then
|
||||
if $data[3][6] == follower then
|
||||
if $data[3][8] == follower then
|
||||
print ---- vgroup $data[3][0] leader locate on dnode $data[3][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[3][6] == LEADER then
|
||||
if $data[3][4] == FOLLOWER then
|
||||
if $data[3][8] == FOLLOWER then
|
||||
elif $data[3][6] == leader then
|
||||
if $data[3][4] == follower then
|
||||
if $data[3][8] == follower then
|
||||
print ---- vgroup $data[3][0] leader locate on dnode $data[3][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[3][8] == LEADER then
|
||||
if $data[3][4] == FOLLOWER then
|
||||
if $data[3][6] == FOLLOWER then
|
||||
elif $data[3][8] == leader then
|
||||
if $data[3][4] == follower then
|
||||
if $data[3][6] == follower then
|
||||
print ---- vgroup $data[3][0] leader locate on dnode $data[3][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -406,21 +406,21 @@ else
|
|||
goto check_vg_ready1
|
||||
endi
|
||||
|
||||
if $data[4][4] == LEADER then
|
||||
if $data[4][6] == FOLLOWER then
|
||||
if $data[4][8] == FOLLOWER then
|
||||
if $data[4][4] == leader then
|
||||
if $data[4][6] == follower then
|
||||
if $data[4][8] == follower then
|
||||
print ---- vgroup $data[4][0] leader locate on dnode $data[4][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[4][6] == LEADER then
|
||||
if $data[4][4] == FOLLOWER then
|
||||
if $data[4][8] == FOLLOWER then
|
||||
elif $data[4][6] == leader then
|
||||
if $data[4][4] == follower then
|
||||
if $data[4][8] == follower then
|
||||
print ---- vgroup $data[4][0] leader locate on dnode $data[4][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[4][8] == LEADER then
|
||||
if $data[4][4] == FOLLOWER then
|
||||
if $data[4][6] == FOLLOWER then
|
||||
elif $data[4][8] == leader then
|
||||
if $data[4][4] == follower then
|
||||
if $data[4][6] == follower then
|
||||
print ---- vgroup $data[4][0] leader locate on dnode $data[4][7]
|
||||
endi
|
||||
endi
|
||||
|
@ -539,27 +539,27 @@ if $data[0][0] != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][2] == LEADER then
|
||||
if $data[1][2] != FOLLOWER then
|
||||
if $data[0][2] == leader then
|
||||
if $data[1][2] != follower then
|
||||
goto check_mnode_ready_3
|
||||
endi
|
||||
if $data[2][2] != FOLLOWER then
|
||||
if $data[2][2] != follower then
|
||||
goto check_mnode_ready_3
|
||||
endi
|
||||
endi
|
||||
if $data[1][2] == LEADER then
|
||||
if $data[0][2] != FOLLOWER then
|
||||
if $data[1][2] == leader then
|
||||
if $data[0][2] != follower then
|
||||
goto check_mnode_ready_3
|
||||
endi
|
||||
if $data[2][2] != FOLLOWER then
|
||||
if $data[2][2] != follower then
|
||||
goto check_mnode_ready_3
|
||||
endi
|
||||
endi
|
||||
if $data[2][2] == LEADER then
|
||||
if $data[1][2] != FOLLOWER then
|
||||
if $data[2][2] == leader then
|
||||
if $data[1][2] != follower then
|
||||
goto check_mnode_ready_3
|
||||
endi
|
||||
if $data[0][2] != FOLLOWER then
|
||||
if $data[0][2] != follower then
|
||||
goto check_mnode_ready_3
|
||||
endi
|
||||
endi
|
||||
|
|
|
@ -170,84 +170,3 @@ if $rows != 100 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
########################################################
|
||||
|
||||
|
||||
########################################################
|
||||
print ===> start dnode1 dnode3 dnode4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
#system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
sleep 7000
|
||||
|
||||
print =============== query data
|
||||
sql connect
|
||||
sql use db
|
||||
sql select * from ct1
|
||||
print rows: $rows
|
||||
print $data00 $data01 $data02
|
||||
if $rows != 100 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
########################################################
|
||||
|
||||
|
||||
########################################################
|
||||
print ===> start dnode1 dnode2 dnode4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
#system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
sleep 3000
|
||||
|
||||
print =============== query data
|
||||
sql select * from ct1
|
||||
print rows: $rows
|
||||
print $data00 $data01 $data02
|
||||
if $rows != 100 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
########################################################
|
||||
|
||||
|
||||
########################################################
|
||||
print ===> start dnode1 dnode2 dnode3
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
#system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
sleep 3000
|
||||
|
||||
print =============== query data
|
||||
sql select * from ct1
|
||||
print rows: $rows
|
||||
print $data00 $data01 $data02
|
||||
if $rows != 100 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
########################################################
|
||||
|
||||
|
||||
|
|
|
@ -1,28 +1,13 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/cfg.sh -n dnode1 -c debugflag -v 131
|
||||
system sh/cfg.sh -n dnode2 -c debugflag -v 131
|
||||
system sh/exec.sh -n dnode1 -s start -v
|
||||
system sh/exec.sh -n dnode2 -s start -v
|
||||
sql connect
|
||||
|
||||
print =============== step1: show dnodes
|
||||
$x = 0
|
||||
step1:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ---> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ---> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
|
||||
print =============== step2: create alter drop show user
|
||||
print =============== step1: create alter drop show user
|
||||
sql create user u1 pass 'taosdata'
|
||||
sql show users
|
||||
sql alter user u1 sysinfo 1
|
||||
|
@ -31,11 +16,34 @@ sql alter user u1 pass 'taosdata'
|
|||
sql drop user u1
|
||||
sql_error alter user u2 sysinfo 0
|
||||
|
||||
print =============== step3: create drop dnode
|
||||
print =============== step2 create drop dnode
|
||||
sql create dnode $hostname port 7200
|
||||
sql drop dnode 2
|
||||
sql create dnode $hostname port 7300
|
||||
sql drop dnode 3
|
||||
sql alter dnode 1 'debugflag 131'
|
||||
|
||||
print =============== step3: show dnodes
|
||||
$x = 0
|
||||
step3:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 60 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step3
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step3
|
||||
endi
|
||||
|
||||
print =============== create database, stable, table
|
||||
sql create database db vgroups 3
|
||||
sql use db
|
||||
|
@ -45,7 +53,7 @@ sql create table tba (ts timestamp, c1 binary(10), c2 nchar(10));
|
|||
|
||||
print =============== run show xxxx
|
||||
sql show dnodes
|
||||
if $rows != 1 then
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -76,7 +84,7 @@ endi
|
|||
|
||||
print =============== run select * from information_schema.xxxx
|
||||
sql select * from information_schema.`dnodes`
|
||||
if $rows != 1 then
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -127,16 +135,27 @@ endi
|
|||
|
||||
print =============== stop
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
||||
print =============== check
|
||||
$null=
|
||||
|
||||
system_content sh/checkValgrind.sh -n dnode1
|
||||
print cmd return result ----> [ $system_content ]
|
||||
if $system_content > 1 then
|
||||
if $system_content > 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $system_content == $null then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system_content sh/checkValgrind.sh -n dnode2
|
||||
print cmd return result ----> [ $system_content ]
|
||||
if $system_content > 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $system_content == $null then
|
||||
return -1
|
||||
endi
|
|
@ -98,7 +98,7 @@ $null=
|
|||
|
||||
system_content sh/checkValgrind.sh -n dnode1
|
||||
print cmd return result ----> [ $system_content ]
|
||||
if $system_content > 1 then
|
||||
if $system_content > 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ $null=
|
|||
|
||||
system_content sh/checkValgrind.sh -n dnode1
|
||||
print cmd return result ----> [ $system_content ]
|
||||
if $system_content > 1 then
|
||||
if $system_content > 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c debugflag -v 131
|
||||
system sh/exec.sh -n dnode1 -s start -v
|
||||
sql connect
|
||||
|
||||
print =============== step1: create drop show dnodes
|
||||
$x = 0
|
||||
step1:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ---> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ---> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
|
||||
print =============== step2: create db
|
||||
sql create database d1 vgroups 2 buffer 3
|
||||
sql create database d2 vgroups 2 buffer 3
|
||||
sql show databases;
|
||||
sql show d1.vgroups;
|
||||
|
||||
print =============== step3: create show stable
|
||||
sql create table if not exists d1.stb1 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
|
||||
sql create table if not exists d1.stb2 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
|
||||
sql create table if not exists d2.stb1 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
|
||||
sql show d1.stables
|
||||
sql show d2.stables
|
||||
|
||||
print =============== step4: create show table
|
||||
sql create table d1.ct1 using d1.stb1 tags(1000)
|
||||
sql create table d1.ct2 using d1.stb1 tags(2000)
|
||||
sql create table d1.ct3 using d1.stb2 tags(3000)
|
||||
sql create table d2.ct1 using d2.stb1 tags(1000)
|
||||
sql create table d2.ct2 using d2.stb1 tags(2000)
|
||||
sql show d1.tables
|
||||
sql show d2.tables
|
||||
|
||||
print =============== step5: insert data
|
||||
sql insert into d1.ct1 values(now+0s, 10, 2.0, 3.0)
|
||||
sql insert into d2.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
|
||||
sql insert into d1.ct2 values(now+0s, 10, 2.0, 3.0)
|
||||
sql insert into d2.ct2 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
|
||||
sql insert into d1.ct3 values('2021-01-01 00:00:00.000', 10, 2.0, 3.0)
|
||||
|
||||
print =============== step6: create db
|
||||
sql drop table d1.ct1
|
||||
sql drop table d1.stb2
|
||||
sql drop database d1
|
||||
sql drop database d2
|
||||
|
||||
goto _OVER
|
||||
print =============== step7: repeat
|
||||
sql create database d1 vgroups 2 buffer 3
|
||||
sql create database d2 vgroups 2 buffer 3
|
||||
sql create table if not exists d1.stb1 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
|
||||
sql create table if not exists d1.stb2 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
|
||||
sql create table if not exists d2.stb1 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
|
||||
sql create table d1.ct1 using d1.stb1 tags(1000)
|
||||
sql create table d1.ct2 using d1.stb1 tags(2000)
|
||||
sql create table d1.ct3 using d1.stb2 tags(3000)
|
||||
sql create table d2.ct1 using d2.stb1 tags(1000)
|
||||
sql create table d2.ct2 using d2.stb1 tags(2000)
|
||||
sql insert into d1.ct1 values(now+0s, 10, 2.0, 3.0)
|
||||
sql insert into d2.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
|
||||
sql insert into d1.ct2 values(now+0s, 10, 2.0, 3.0)
|
||||
sql insert into d2.ct2 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
|
||||
sql insert into d1.ct3 values('2021-01-01 00:00:00.000', 10, 2.0, 3.0)
|
||||
sql drop table d1.ct1
|
||||
sql drop table d1.stb2
|
||||
sql drop database d1
|
||||
sql drop database d2
|
||||
|
||||
_OVER:
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
print =============== check
|
||||
$null=
|
||||
|
||||
system_content sh/checkValgrind.sh -n dnode1
|
||||
print cmd return result ----> [ $system_content ]
|
||||
if $system_content > 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $system_content == $null then
|
||||
return -1
|
||||
endi
|
|
@ -0,0 +1,305 @@
|
|||
from distutils.log import error
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import platform
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
import subprocess
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
|
||||
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
|
||||
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":0}
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def prepare_udf_so(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
print(projPath)
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf2 = subprocess.Popen('(for /r %s %%i in ("udf2.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
if (not tdDnodes.dnodes[0].remoteIP == ""):
|
||||
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
|
||||
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf2.so',projPath+"\\debug\\build\\lib\\")
|
||||
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
|
||||
self.libudf2 = self.libudf2.replace('udf2.dll','libudf2.so')
|
||||
else:
|
||||
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
|
||||
self.libudf2 = self.libudf2.replace('\r','').replace('\n','')
|
||||
|
||||
|
||||
def prepare_data(self):
|
||||
|
||||
tdSql.execute("drop database if exists db ")
|
||||
tdSql.execute("create database if not exists db duration 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))")
|
||||
tdSql.execute(
|
||||
f'''insert into tb values
|
||||
( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" )
|
||||
( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" )
|
||||
( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" )
|
||||
( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
|
||||
'''
|
||||
)
|
||||
|
||||
# udf functions with join
|
||||
ts_start = 1652517451000
|
||||
tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)")
|
||||
tdSql.execute("create table sub1 using st tags(1)")
|
||||
tdSql.execute("create table sub2 using st tags(2)")
|
||||
|
||||
for i in range(10):
|
||||
ts = ts_start + i *1000
|
||||
tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
|
||||
tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
|
||||
|
||||
|
||||
def create_udf_function(self):
|
||||
|
||||
for i in range(5):
|
||||
# create scalar functions
|
||||
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
|
||||
|
||||
# create aggregate functions
|
||||
|
||||
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
function_nums = len(functions)
|
||||
if function_nums == 2:
|
||||
tdLog.info("create two udf functions success ")
|
||||
|
||||
# drop functions
|
||||
|
||||
tdSql.execute("drop function udf1")
|
||||
tdSql.execute("drop function udf2")
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
for function in functions:
|
||||
if "udf1" in function[0] or "udf2" in function[0]:
|
||||
tdLog.info("drop udf functions failed ")
|
||||
tdLog.exit("drop udf functions failed")
|
||||
|
||||
tdLog.info("drop two udf functions success ")
|
||||
|
||||
# create scalar functions
|
||||
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
|
||||
|
||||
# create aggregate functions
|
||||
|
||||
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
function_nums = len(functions)
|
||||
if function_nums == 2:
|
||||
tdLog.info("create two udf functions success ")
|
||||
|
||||
def basic_udf_query(self):
|
||||
|
||||
# scalar functions
|
||||
|
||||
tdSql.execute("use db ")
|
||||
tdSql.error("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
|
||||
tdSql.error("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
|
||||
|
||||
# aggregate functions
|
||||
tdSql.error("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
|
||||
|
||||
# Arithmetic compute
|
||||
tdSql.error("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb")
|
||||
|
||||
tdSql.error("select udf2(c1) ,udf2(c6) from stb1 ")
|
||||
|
||||
tdSql.error("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ")
|
||||
|
||||
# # bug for crash when query sub table
|
||||
tdSql.error("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1")
|
||||
|
||||
tdSql.error("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ")
|
||||
|
||||
# regular table with aggregate functions
|
||||
|
||||
tdSql.error("select udf1(num1) , count(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , avg(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , twa(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , irate(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , sum(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , stddev(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , mode(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;")
|
||||
# stable
|
||||
tdSql.error("select udf1(c1) , count(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , avg(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , twa(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , irate(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , sum(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , stddev(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , mode(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;")
|
||||
|
||||
# regular table with select functions
|
||||
|
||||
tdSql.error("select udf1(num1) , max(num1) from tb;")
|
||||
|
||||
|
||||
tdSql.error("select udf1(num1) , min(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , first(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , last(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , top(num1,1) from tb;")
|
||||
tdSql.error("select udf1(num1) , bottom(num1,1) from tb;")
|
||||
|
||||
|
||||
# stable
|
||||
tdSql.error("select udf1(c1) , max(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , min(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , first(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , last(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , top(c1 ,1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , bottom(c1,1) from stb1;")
|
||||
|
||||
|
||||
# regular table with compute functions
|
||||
|
||||
tdSql.error("select udf1(num1) , abs(num1) from tb;")
|
||||
|
||||
# stable with compute functions
|
||||
tdSql.error("select udf1(c1) , abs(c1) from stb1;")
|
||||
|
||||
# nest query
|
||||
tdSql.error("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;")
|
||||
tdSql.error("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
|
||||
|
||||
# bug fix for crash
|
||||
# order by udf function result
|
||||
for _ in range(50):
|
||||
tdSql.error("select udf2(c1) from stb1 group by 1-udf1(c1)")
|
||||
print(tdSql.queryResult)
|
||||
|
||||
# udf functions with filter
|
||||
|
||||
tdSql.error("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
|
||||
tdSql.error("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
|
||||
tdSql.error("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.error("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
|
||||
# udf functions with group by
|
||||
tdSql.error("select udf1(c1) from ct1 group by c1")
|
||||
tdSql.error("select udf1(c1) from stb1 group by c1")
|
||||
tdSql.error("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2")
|
||||
tdSql.error("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2")
|
||||
tdSql.error("select udf2(c1) from ct1 group by c1")
|
||||
tdSql.error("select udf2(c1) from stb1 group by c1")
|
||||
tdSql.error("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2")
|
||||
tdSql.error("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2")
|
||||
tdSql.error("select udf2(c1) from stb1 group by udf1(c1)")
|
||||
tdSql.error("select udf2(c1) from stb1 group by floor(c1)")
|
||||
|
||||
# udf mix with order by
|
||||
tdSql.error("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)")
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
|
||||
print(" env is ok for all ")
|
||||
self.prepare_udf_so()
|
||||
self.prepare_data()
|
||||
self.create_udf_function()
|
||||
self.basic_udf_query()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,667 @@
|
|||
from distutils.log import error
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import platform
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
import subprocess
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
|
||||
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
|
||||
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":1}
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def prepare_udf_so(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
print(projPath)
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf2 = subprocess.Popen('(for /r %s %%i in ("udf2.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
if (not tdDnodes.dnodes[0].remoteIP == ""):
|
||||
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
|
||||
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf2.so',projPath+"\\debug\\build\\lib\\")
|
||||
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
|
||||
self.libudf2 = self.libudf2.replace('udf2.dll','libudf2.so')
|
||||
else:
|
||||
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
|
||||
self.libudf2 = self.libudf2.replace('\r','').replace('\n','')
|
||||
|
||||
|
||||
def prepare_data(self):
|
||||
|
||||
tdSql.execute("drop database if exists db ")
|
||||
tdSql.execute("create database if not exists db duration 300")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))")
|
||||
tdSql.execute(
|
||||
f'''insert into tb values
|
||||
( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" )
|
||||
( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" )
|
||||
( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" )
|
||||
( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
|
||||
'''
|
||||
)
|
||||
|
||||
# udf functions with join
|
||||
ts_start = 1652517451000
|
||||
tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)")
|
||||
tdSql.execute("create table sub1 using st tags(1)")
|
||||
tdSql.execute("create table sub2 using st tags(2)")
|
||||
|
||||
for i in range(10):
|
||||
ts = ts_start + i *1000
|
||||
tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
|
||||
tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
|
||||
|
||||
|
||||
def create_udf_function(self):
|
||||
|
||||
for i in range(5):
|
||||
# create scalar functions
|
||||
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
|
||||
|
||||
# create aggregate functions
|
||||
|
||||
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
function_nums = len(functions)
|
||||
if function_nums == 2:
|
||||
tdLog.info("create two udf functions success ")
|
||||
|
||||
# drop functions
|
||||
|
||||
tdSql.execute("drop function udf1")
|
||||
tdSql.execute("drop function udf2")
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
for function in functions:
|
||||
if "udf1" in function[0] or "udf2" in function[0]:
|
||||
tdLog.info("drop udf functions failed ")
|
||||
tdLog.exit("drop udf functions failed")
|
||||
|
||||
tdLog.info("drop two udf functions success ")
|
||||
|
||||
# create scalar functions
|
||||
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
|
||||
|
||||
# create aggregate functions
|
||||
|
||||
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
function_nums = len(functions)
|
||||
if function_nums == 2:
|
||||
tdLog.info("create two udf functions success ")
|
||||
|
||||
def basic_udf_query(self):
|
||||
|
||||
# scalar functions
|
||||
|
||||
tdSql.execute("use db ")
|
||||
tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,None)
|
||||
tdSql.checkData(0,2,1)
|
||||
tdSql.checkData(0,3,88)
|
||||
tdSql.checkData(0,4,1.000000000)
|
||||
tdSql.checkData(0,5,88)
|
||||
tdSql.checkData(0,6,"binary1")
|
||||
tdSql.checkData(0,7,88)
|
||||
|
||||
tdSql.checkData(3,0,3)
|
||||
tdSql.checkData(3,1,88)
|
||||
tdSql.checkData(3,2,33333)
|
||||
tdSql.checkData(3,3,88)
|
||||
tdSql.checkData(3,4,33.000000000)
|
||||
tdSql.checkData(3,5,88)
|
||||
tdSql.checkData(3,6,"binary1")
|
||||
tdSql.checkData(3,7,88)
|
||||
|
||||
tdSql.checkData(11,0,None)
|
||||
tdSql.checkData(11,1,None)
|
||||
tdSql.checkData(11,2,None)
|
||||
tdSql.checkData(11,3,None)
|
||||
tdSql.checkData(11,4,None)
|
||||
tdSql.checkData(11,5,None)
|
||||
tdSql.checkData(11,6,"binary1")
|
||||
tdSql.checkData(11,7,88)
|
||||
|
||||
tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,None)
|
||||
tdSql.checkData(0,2,None)
|
||||
tdSql.checkData(0,3,None)
|
||||
tdSql.checkData(0,4,None)
|
||||
tdSql.checkData(0,5,None)
|
||||
tdSql.checkData(0,6,None)
|
||||
tdSql.checkData(0,7,None)
|
||||
|
||||
tdSql.checkData(20,0,8)
|
||||
tdSql.checkData(20,1,88)
|
||||
tdSql.checkData(20,2,88888)
|
||||
tdSql.checkData(20,3,88)
|
||||
tdSql.checkData(20,4,888)
|
||||
tdSql.checkData(20,5,88)
|
||||
tdSql.checkData(20,6,88)
|
||||
tdSql.checkData(20,7,88)
|
||||
|
||||
|
||||
# aggregate functions
|
||||
tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
|
||||
tdSql.checkData(0,0,15.362291496)
|
||||
tdSql.checkData(0,1,10000949.553189287)
|
||||
tdSql.checkData(0,2,168.633425216)
|
||||
|
||||
# Arithmetic compute
|
||||
tdSql.query("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb")
|
||||
tdSql.checkData(0,0,115.362291496)
|
||||
tdSql.checkData(0,1,10000849.553189287)
|
||||
tdSql.checkData(0,2,16863.342521576)
|
||||
tdSql.checkData(0,3,1.686334252)
|
||||
|
||||
tdSql.query("select udf2(c1) ,udf2(c6) from stb1 ")
|
||||
tdSql.checkData(0,0,25.514701644)
|
||||
tdSql.checkData(0,1,265.247614504)
|
||||
|
||||
tdSql.query("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ")
|
||||
tdSql.checkData(0,0,125.514701644)
|
||||
tdSql.checkData(0,1,165.247614504)
|
||||
tdSql.checkData(0,2,2551.470164435)
|
||||
tdSql.checkData(0,3,2.652476145)
|
||||
|
||||
# # bug for crash when query sub table
|
||||
tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1")
|
||||
tdSql.checkData(0,0,378.215547010)
|
||||
tdSql.checkData(0,1,353.808067460)
|
||||
tdSql.checkData(0,2,2114.237451187)
|
||||
tdSql.checkData(0,3,2.125468151)
|
||||
|
||||
tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ")
|
||||
tdSql.checkData(0,0,490.358032462)
|
||||
tdSql.checkData(0,1,400.460106627)
|
||||
tdSql.checkData(0,2,2551.470164435)
|
||||
tdSql.checkData(0,3,2.652476145)
|
||||
|
||||
|
||||
# regular table with aggregate functions
|
||||
|
||||
tdSql.error("select udf1(num1) , count(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , avg(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , twa(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , irate(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , sum(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , stddev(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , mode(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;")
|
||||
# stable
|
||||
tdSql.error("select udf1(c1) , count(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , avg(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , twa(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , irate(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , sum(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , stddev(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , mode(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;")
|
||||
|
||||
# regular table with select functions
|
||||
|
||||
tdSql.query("select udf1(num1) , max(num1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select floor(num1) , max(num1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(num1) , min(num1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select ceil(num1) , min(num1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(num1) , first(num1) from tb;")
|
||||
|
||||
tdSql.query("select abs(num1) , first(num1) from tb;")
|
||||
|
||||
tdSql.query("select udf1(num1) , last(num1) from tb;")
|
||||
|
||||
tdSql.query("select round(num1) , last(num1) from tb;")
|
||||
|
||||
tdSql.query("select udf1(num1) , top(num1,1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(num1) , bottom(num1,1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
# tdSql.query("select udf1(num1) , last_row(num1) from tb;")
|
||||
# tdSql.checkRows(1)
|
||||
|
||||
# tdSql.query("select round(num1) , last_row(num1) from tb;")
|
||||
# tdSql.checkRows(1)
|
||||
|
||||
|
||||
# stable
|
||||
tdSql.query("select udf1(c1) , max(c1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select abs(c1) , max(c1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(c1) , min(c1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select floor(c1) , min(c1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(c1) , first(c1) from stb1;")
|
||||
|
||||
tdSql.query("select udf1(c1) , last(c1) from stb1;")
|
||||
|
||||
tdSql.query("select udf1(c1) , top(c1 ,1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select abs(c1) , top(c1 ,1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(c1) , bottom(c1,1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select ceil(c1) , bottom(c1,1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
# tdSql.query("select udf1(c1) , last_row(c1) from stb1;")
|
||||
# tdSql.checkRows(1)
|
||||
# tdSql.query("select ceil(c1) , last_row(c1) from stb1;")
|
||||
# tdSql.checkRows(1)
|
||||
|
||||
# regular table with compute functions
|
||||
|
||||
tdSql.query("select udf1(num1) , abs(num1) from tb;")
|
||||
tdSql.checkRows(12)
|
||||
tdSql.query("select floor(num1) , abs(num1) from tb;")
|
||||
tdSql.checkRows(12)
|
||||
|
||||
# # bug need fix
|
||||
|
||||
#tdSql.query("select udf1(num1) , csum(num1) from tb;")
|
||||
#tdSql.checkRows(9)
|
||||
#tdSql.query("select ceil(num1) , csum(num1) from tb;")
|
||||
#tdSql.checkRows(9)
|
||||
#tdSql.query("select udf1(c1) , csum(c1) from stb1;")
|
||||
#tdSql.checkRows(22)
|
||||
#tdSql.query("select floor(c1) , csum(c1) from stb1;")
|
||||
#tdSql.checkRows(22)
|
||||
|
||||
# stable with compute functions
|
||||
tdSql.query("select udf1(c1) , abs(c1) from stb1;")
|
||||
tdSql.checkRows(25)
|
||||
tdSql.query("select abs(c1) , ceil(c1) from stb1;")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
# nest query
|
||||
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;")
|
||||
tdSql.checkRows(25)
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,None)
|
||||
tdSql.checkData(1,0,88)
|
||||
tdSql.checkData(1,1,8)
|
||||
|
||||
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
|
||||
tdSql.checkRows(13)
|
||||
tdSql.checkData(0,0,88)
|
||||
tdSql.checkData(0,1,8)
|
||||
tdSql.checkData(1,0,88)
|
||||
tdSql.checkData(1,1,7)
|
||||
|
||||
# bug fix for crash
|
||||
# order by udf function result
|
||||
for _ in range(50):
|
||||
tdSql.query("select udf2(c1) from stb1 group by 1-udf1(c1)")
|
||||
print(tdSql.queryResult)
|
||||
|
||||
# udf functions with filter
|
||||
|
||||
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,None)
|
||||
|
||||
tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,0,9)
|
||||
tdSql.checkData(0,1,88)
|
||||
tdSql.checkData(0,2,-99.990000000)
|
||||
tdSql.checkData(0,3,88)
|
||||
|
||||
tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,0)
|
||||
tdSql.checkData(0,1,0)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,10)
|
||||
|
||||
tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,88)
|
||||
tdSql.checkData(0,1,88)
|
||||
tdSql.checkData(1,0,88)
|
||||
tdSql.checkData(1,1,88)
|
||||
|
||||
tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,0)
|
||||
tdSql.checkData(0,1,88)
|
||||
tdSql.checkData(0,2,0)
|
||||
tdSql.checkData(0,3,88)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,88)
|
||||
tdSql.checkData(1,2,10)
|
||||
tdSql.checkData(1,3,88)
|
||||
|
||||
tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,16.881943016)
|
||||
tdSql.checkData(0,1,168.819430161)
|
||||
tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
|
||||
# udf functions with group by
|
||||
tdSql.query("select udf1(c1) from ct1 group by c1")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select udf1(c1) from stb1 group by c1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2")
|
||||
tdSql.checkRows(11)
|
||||
|
||||
tdSql.query("select udf2(c1) from ct1 group by c1")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select udf2(c1) from stb1 group by c1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query("select udf2(c1) from stb1 group by udf1(c1)")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query("select udf2(c1) from stb1 group by floor(c1)")
|
||||
tdSql.checkRows(11)
|
||||
|
||||
# udf mix with order by
|
||||
tdSql.query("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)")
|
||||
tdSql.checkRows(11)
|
||||
|
||||
|
||||
def multi_cols_udf(self):
|
||||
tdSql.query("select num1,num2,num3,udf1(num1,num2,num3) from tb")
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,1)
|
||||
tdSql.checkData(0,2,1.000000000)
|
||||
tdSql.checkData(0,3,None)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,1)
|
||||
tdSql.checkData(1,2,1.110000000)
|
||||
tdSql.checkData(1,3,88)
|
||||
|
||||
tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts")
|
||||
tdSql.checkData(1,0,8)
|
||||
tdSql.checkData(1,1,88.880000000)
|
||||
tdSql.checkData(1,2,88)
|
||||
|
||||
tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;")
|
||||
tdSql.checkRows(22)
|
||||
|
||||
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,169.661427555)
|
||||
tdSql.checkData(0,1,169.661427555)
|
||||
|
||||
def try_query_sql(self):
|
||||
udf1_sqls = [
|
||||
"select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb" ,
|
||||
"select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1" ,
|
||||
"select udf1(num1) , max(num1) from tb;" ,
|
||||
"select udf1(num1) , min(num1) from tb;" ,
|
||||
#"select udf1(num1) , top(num1,1) from tb;" ,
|
||||
#"select udf1(num1) , bottom(num1,1) from tb;" ,
|
||||
"select udf1(c1) , max(c1) from stb1;" ,
|
||||
"select udf1(c1) , min(c1) from stb1;" ,
|
||||
#"select udf1(c1) , top(c1 ,1) from stb1;" ,
|
||||
#"select udf1(c1) , bottom(c1,1) from stb1;" ,
|
||||
"select udf1(num1) , abs(num1) from tb;" ,
|
||||
#"select udf1(num1) , csum(num1) from tb;" ,
|
||||
#"select udf1(c1) , csum(c1) from stb1;" ,
|
||||
"select udf1(c1) , abs(c1) from stb1;" ,
|
||||
"select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;" ,
|
||||
"select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;" ,
|
||||
"select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;" ,
|
||||
"select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts" ,
|
||||
"select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf1(c1) from ct1 group by c1" ,
|
||||
"select udf1(c1) from stb1 group by c1" ,
|
||||
"select c1,c2, udf1(c1,c2) from ct1 group by c1,c2" ,
|
||||
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
|
||||
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
|
||||
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
|
||||
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
|
||||
]
|
||||
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
|
||||
"select udf2(num1) ,udf2(num2), udf2(num3) from tb" ,
|
||||
"select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb" ,
|
||||
"select udf2(c1) ,udf2(c6) from stb1 " ,
|
||||
"select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 " ,
|
||||
"select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1" ,
|
||||
"select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 " ,
|
||||
"select udf2(c1) from ct1 group by c1" ,
|
||||
"select udf2(c1) from stb1 group by c1" ,
|
||||
"select c1,c2, udf2(c1,c6) from ct1 group by c1,c2" ,
|
||||
"select c1,c2, udf2(c1,c6) from stb1 group by c1,c2" ,
|
||||
"select udf2(c1) from stb1 group by udf1(c1)" ,
|
||||
"select udf2(c1) from stb1 group by floor(c1)" ,
|
||||
"select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)" ,
|
||||
|
||||
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null"]
|
||||
|
||||
return udf1_sqls ,udf2_sqls
|
||||
|
||||
|
||||
|
||||
def unexpected_create(self):
|
||||
|
||||
tdLog.info(" create function with out bufsize ")
|
||||
tdSql.query("drop function udf1 ")
|
||||
tdSql.query("drop function udf2 ")
|
||||
|
||||
# create function without buffer
|
||||
tdSql.execute("create function udf1 as '%s' outputtype int"%self.libudf1)
|
||||
tdSql.execute("create aggregate function udf2 as '%s' outputtype double"%self.libudf2)
|
||||
udf1_sqls ,udf2_sqls = self.try_query_sql()
|
||||
|
||||
for scalar_sql in udf1_sqls:
|
||||
tdSql.query(scalar_sql)
|
||||
for aggregate_sql in udf2_sqls:
|
||||
tdSql.error(aggregate_sql)
|
||||
|
||||
# create function without aggregate
|
||||
|
||||
tdLog.info(" create function with out aggregate ")
|
||||
tdSql.query("drop function udf1 ")
|
||||
tdSql.query("drop function udf2 ")
|
||||
|
||||
# create function without buffer
|
||||
tdSql.execute("create aggregate function udf1 as '%s' outputtype int bufSize 8 "%self.libudf1)
|
||||
tdSql.execute("create function udf2 as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
udf1_sqls ,udf2_sqls = self.try_query_sql()
|
||||
|
||||
for scalar_sql in udf1_sqls:
|
||||
tdSql.error(scalar_sql)
|
||||
for aggregate_sql in udf2_sqls:
|
||||
tdSql.error(aggregate_sql)
|
||||
|
||||
tdSql.execute(" create function db as '%s' outputtype int bufSize 8 "%self.libudf1)
|
||||
tdSql.execute(" create aggregate function test as '%s' outputtype int bufSize 8 "%self.libudf1)
|
||||
tdSql.error(" select db(c1) from stb1 ")
|
||||
tdSql.error(" select db(c1,c6), db(c6) from stb1 ")
|
||||
tdSql.error(" select db(num1,num2), db(num1) from tb ")
|
||||
tdSql.error(" select test(c1) from stb1 ")
|
||||
tdSql.error(" select test(c1,c6), test(c6) from stb1 ")
|
||||
tdSql.error(" select test(num1,num2), test(num1) from tb ")
|
||||
|
||||
|
||||
|
||||
def loop_kill_udfd(self):
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
cfgPath = buildPath + "/../sim/dnode1/cfg"
|
||||
udfdPath = buildPath +'/build/bin/udfd'
|
||||
|
||||
for i in range(3):
|
||||
|
||||
tdLog.info(" loop restart udfd %d_th" % i)
|
||||
|
||||
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,169.661427555)
|
||||
tdSql.checkData(0,1,169.661427555)
|
||||
# stop udfd cmds
|
||||
get_processID = "ps -ef | grep -w udfd | grep -v grep| grep -v defunct | awk '{print $2}'"
|
||||
processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
|
||||
stop_udfd = " kill -9 %s" % processID
|
||||
os.system(stop_udfd)
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,169.661427555)
|
||||
tdSql.checkData(0,1,169.661427555)
|
||||
|
||||
# # start udfd cmds
|
||||
# start_udfd = "nohup " + udfdPath +'-c' +cfgPath +" > /dev/null 2>&1 &"
|
||||
# tdLog.info("start udfd : %s " % start_udfd)
|
||||
|
||||
def test_function_name(self):
|
||||
tdLog.info(" create function name is not build_in functions ")
|
||||
tdSql.execute(" drop function udf1 ")
|
||||
tdSql.execute(" drop function udf2 ")
|
||||
tdSql.error("create function max as '%s' outputtype int bufSize 8"%self.libudf1)
|
||||
tdSql.error("create aggregate function sum as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create function max as '%s' outputtype int bufSize 8"%self.libudf1)
|
||||
tdSql.error("create aggregate function sum as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function tbname as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function function as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function stable as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function union as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function 123 as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function 123db as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function mnode as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
|
||||
def restart_taosd_query_udf(self):
|
||||
|
||||
self.create_udf_function()
|
||||
|
||||
for i in range(5):
|
||||
tdLog.info(" this is %d_th restart taosd " %i)
|
||||
tdSql.execute("use db ")
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,169.661427555)
|
||||
tdSql.checkData(0,1,169.661427555)
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
|
||||
print(" env is ok for all ")
|
||||
self.prepare_udf_so()
|
||||
self.prepare_data()
|
||||
self.create_udf_function()
|
||||
self.basic_udf_query()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,63 @@
|
|||
import taos
|
||||
import sys
|
||||
import datetime
|
||||
import inspect
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
import random
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
|
||||
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
|
||||
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), False)
|
||||
|
||||
def case1(self):
|
||||
tdSql.execute("create database if not exists dbms precision 'ms'")
|
||||
tdSql.execute("create database if not exists dbus precision 'us'")
|
||||
tdSql.execute("create database if not exists dbns precision 'ns'")
|
||||
|
||||
tdSql.execute("create table dbms.ntb (ts timestamp, c1 int, c2 bigint)")
|
||||
tdSql.execute("create table dbus.ntb (ts timestamp, c1 int, c2 bigint)")
|
||||
tdSql.execute("create table dbns.ntb (ts timestamp, c1 int, c2 bigint)")
|
||||
|
||||
tdSql.execute("insert into dbms.ntb values ('2022-01-01 08:00:00.001', 1, 2)")
|
||||
tdSql.execute("insert into dbms.ntb values ('2022-01-01 08:00:00.002', 3, 4)")
|
||||
|
||||
tdSql.execute("insert into dbus.ntb values ('2022-01-01 08:00:00.000001', 1, 2)")
|
||||
tdSql.execute("insert into dbus.ntb values ('2022-01-01 08:00:00.000002', 3, 4)")
|
||||
|
||||
tdSql.execute("insert into dbns.ntb values ('2022-01-01 08:00:00.000000001', 1, 2)")
|
||||
tdSql.execute("insert into dbns.ntb values ('2022-01-01 08:00:00.000000002', 3, 4)")
|
||||
|
||||
tdSql.query("select count(c1) from dbms.ntb interval(1a)")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query("select count(c1) from dbus.ntb interval(1u)")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query("select count(c1) from dbns.ntb interval(1b)")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========start case1 run ...............")
|
||||
|
||||
self.case1()
|
||||
|
||||
tdLog.printNoPrefix("==========end case1 run ...............")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -392,6 +392,31 @@ class TDTestCase:
|
|||
tdSql.query("select c0,c1 from stb11_1 where (c0>1000) union all select c0,c1 from stb11_1 where c0>2000;")
|
||||
assert unionallQnode==tdSql.queryResult
|
||||
|
||||
queryPolicy=1
|
||||
|
||||
tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
|
||||
tdSql.query("show local variables;")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "queryPolicy" :
|
||||
if int(tdSql.queryResult[i][1]) == int(queryPolicy):
|
||||
tdLog.success('alter queryPolicy to %d successfully'%queryPolicy)
|
||||
else :
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.exit("alter queryPolicy to %d failed"%queryPolicy)
|
||||
tdSql.execute("reset query cache")
|
||||
|
||||
tdSql.execute("use db1;")
|
||||
tdSql.query("show dnodes;")
|
||||
dnodeId=tdSql.getData(0,0)
|
||||
tdSql.query("select max(c1) from stb10;")
|
||||
assert maxQnode==tdSql.getData(0,0)
|
||||
tdSql.query("select min(c1) from stb11;")
|
||||
assert minQnode==tdSql.getData(0,0)
|
||||
tdSql.query("select c0,c1 from stb11_1 where (c0>1000) union select c0,c1 from stb11_1 where c0>2000;")
|
||||
assert unionQnode==tdSql.queryResult
|
||||
tdSql.query("select c0,c1 from stb11_1 where (c0>1000) union all select c0,c1 from stb11_1 where c0>2000;")
|
||||
assert unionallQnode==tdSql.queryResult
|
||||
|
||||
# test case : queryPolicy = 2
|
||||
def test_case2(self):
|
||||
self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 10, 2, 1*10)
|
||||
|
|
|
@ -40,9 +40,9 @@ class TDTestCase:
|
|||
f"insert into rct{j} values ( {ts+i*10000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into dct{j} values ( {ts+i*10000}, {1+i*0.1},{1400+i*15}, {1+i},{1500+i*20}, {150+i*2},{5+i} )"
|
||||
f"insert into dct{j} values ( {ts+i*10000}, {1+i*0.1},{1400+i*15}, {i},{1500+i*20}, {150+i*2},{5+i} )"
|
||||
)
|
||||
|
||||
tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;")
|
||||
# def check_avg(self ,origin_query , check_query):
|
||||
# avg_result = tdSql.getResult(origin_query)
|
||||
# origin_result = tdSql.getResult(check_query)
|
||||
|
@ -60,13 +60,15 @@ class TDTestCase:
|
|||
|
||||
|
||||
def tsbsIotQuery(self):
|
||||
|
||||
tdSql.execute("use db_tsbs")
|
||||
|
||||
# test interval and partition
|
||||
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
|
||||
print(tdSql.queryResult)
|
||||
parRows=tdSql.queryRows
|
||||
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
|
||||
# tdSql.checkRows(parRows)
|
||||
tdSql.checkRows(parRows)
|
||||
|
||||
|
||||
# test insert into
|
||||
|
@ -77,18 +79,53 @@ class TDTestCase:
|
|||
|
||||
|
||||
# test paitition interval fill
|
||||
# tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
|
||||
tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
|
||||
|
||||
|
||||
# # test partition interval limit
|
||||
# tdSql.query("SELECT ts,model,floor(2*(sum(nzs)/count(nzs)))/floor(2*(sum(nzs)/count(nzs))) AS broken_down FROM (SELECT ts,model, status/status AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model,ts interval(10m) limit 10;")
|
||||
# test partition interval limit (PRcore-TD-17410)
|
||||
# tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
|
||||
# tdSql.checkRows(10)
|
||||
|
||||
# test partition interval Pseudo time-column
|
||||
tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
|
||||
|
||||
# 1 high-load:
|
||||
# tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
|
||||
|
||||
# tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
|
||||
|
||||
# 2 stationary-trucks
|
||||
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
|
||||
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
|
||||
|
||||
# 3 long-driving-sessions
|
||||
# tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
|
||||
|
||||
|
||||
#4 long-daily-sessions
|
||||
tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
|
||||
|
||||
# 5. avg-daily-driving-duration
|
||||
tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
|
||||
|
||||
|
||||
# 6. avg-daily-driving-session
|
||||
#taosc core dumped
|
||||
tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
|
||||
tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
|
||||
tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
|
||||
|
||||
# 7. avg-load
|
||||
tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
|
||||
|
||||
# 8. daily-activity
|
||||
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
|
||||
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
|
||||
|
||||
#it's already supported:
|
||||
# last-loc
|
||||
tdSql.query("")
|
||||
|
||||
# test
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
|
||||
self.prepareData()
|
||||
|
|
|
@ -11,6 +11,9 @@ python3 ./test.py -f 0-others/udfTest.py
|
|||
python3 ./test.py -f 0-others/udf_create.py
|
||||
python3 ./test.py -f 0-others/udf_restart_taosd.py
|
||||
python3 ./test.py -f 0-others/cachelast.py
|
||||
python3 ./test.py -f 0-others/udf_cfg1.py
|
||||
python3 ./test.py -f 0-others/udf_cfg2.py
|
||||
|
||||
python3 ./test.py -f 0-others/sysinfo.py
|
||||
python3 ./test.py -f 0-others/user_control.py
|
||||
python3 ./test.py -f 0-others/fsync.py
|
||||
|
@ -29,6 +32,7 @@ python3 ./test.py -f 1-insert/block_wise.py
|
|||
python3 ./test.py -f 1-insert/create_retentions.py
|
||||
python3 ./test.py -f 1-insert/table_param_ttl.py
|
||||
|
||||
python3 ./test.py -f 2-query/db.py
|
||||
python3 ./test.py -f 2-query/between.py
|
||||
python3 ./test.py -f 2-query/distinct.py
|
||||
python3 ./test.py -f 2-query/varchar.py
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit bd496f76b64931c66da2f8b0f24143a98a881cde
|
||||
Subproject commit b7b922268c4a06d9db77ffdfde0726f3d9900b72
|
Loading…
Reference in New Issue