Merge branch '3.0' into feature/3_liaohj

This commit is contained in:
Haojun Liao 2022-07-17 11:06:49 +08:00
commit ad058e8c97
69 changed files with 3006 additions and 1012 deletions

View File

@ -35,6 +35,7 @@ extern "C" {
#define TSDB_INS_TABLE_USER_INDEXES "user_indexes"
#define TSDB_INS_TABLE_USER_STABLES "user_stables"
#define TSDB_INS_TABLE_USER_TABLES "user_tables"
#define TSDB_INS_TABLE_USER_TAGS "user_tags"
#define TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED "user_table_distributed"
#define TSDB_INS_TABLE_USER_USERS "user_users"
#define TSDB_INS_TABLE_LICENCES "grants"

View File

@ -75,13 +75,18 @@ typedef uint16_t tmsg_t;
#define TSDB_IE_TYPE_DNODE_EXT 6
#define TSDB_IE_TYPE_DNODE_STATE 7
enum { CONN_TYPE__QUERY = 1, CONN_TYPE__TMQ, CONN_TYPE__UDFD, CONN_TYPE__MAX };
enum {
CONN_TYPE__QUERY = 1,
CONN_TYPE__TMQ,
CONN_TYPE__UDFD,
CONN_TYPE__MAX,
};
enum {
HEARTBEAT_KEY_USER_AUTHINFO = 1,
HEARTBEAT_KEY_DBINFO,
HEARTBEAT_KEY_STBINFO,
HEARTBEAT_KEY_MQ_TMP,
HEARTBEAT_KEY_TMQ,
};
typedef enum _mgmt_table {
@ -99,6 +104,7 @@ typedef enum _mgmt_table {
TSDB_MGMT_TABLE_STB,
TSDB_MGMT_TABLE_STREAMS,
TSDB_MGMT_TABLE_TABLE,
TSDB_MGMT_TABLE_TAG,
TSDB_MGMT_TABLE_USER,
TSDB_MGMT_TABLE_GRANTS,
TSDB_MGMT_TABLE_VGROUP,
@ -2146,6 +2152,15 @@ typedef struct {
char cgroup[TSDB_CGROUP_LEN];
} SMqAskEpReq;
typedef struct {
int64_t consumerId;
int32_t epoch;
} SMqHbReq;
typedef struct {
int8_t reserved;
} SMqHbRsp;
typedef struct {
int32_t key;
int32_t valueLen;
@ -2335,29 +2350,30 @@ static FORCE_INLINE int32_t tDecodeSClientHbKey(SDecoder* pDecoder, SClientHbKey
return 0;
}
typedef struct SMqHbVgInfo {
typedef struct {
int32_t vgId;
} SMqHbVgInfo;
// TODO stas
} SMqReportVgInfo;
static FORCE_INLINE int32_t taosEncodeSMqVgInfo(void** buf, const SMqHbVgInfo* pVgInfo) {
static FORCE_INLINE int32_t taosEncodeSMqVgInfo(void** buf, const SMqReportVgInfo* pVgInfo) {
int32_t tlen = 0;
tlen += taosEncodeFixedI32(buf, pVgInfo->vgId);
return tlen;
}
static FORCE_INLINE void* taosDecodeSMqVgInfo(void* buf, SMqHbVgInfo* pVgInfo) {
static FORCE_INLINE void* taosDecodeSMqVgInfo(void* buf, SMqReportVgInfo* pVgInfo) {
buf = taosDecodeFixedI32(buf, &pVgInfo->vgId);
return buf;
}
typedef struct SMqHbTopicInfo {
typedef struct {
int32_t epoch;
int64_t topicUid;
char name[TSDB_TOPIC_FNAME_LEN];
SArray* pVgInfo; // SArray<SMqHbVgInfo>
} SMqHbTopicInfo;
} SMqTopicInfo;
static FORCE_INLINE int32_t taosEncodeSMqHbTopicInfoMsg(void** buf, const SMqHbTopicInfo* pTopicInfo) {
static FORCE_INLINE int32_t taosEncodeSMqTopicInfoMsg(void** buf, const SMqTopicInfo* pTopicInfo) {
int32_t tlen = 0;
tlen += taosEncodeFixedI32(buf, pTopicInfo->epoch);
tlen += taosEncodeFixedI64(buf, pTopicInfo->topicUid);
@ -2365,35 +2381,35 @@ static FORCE_INLINE int32_t taosEncodeSMqHbTopicInfoMsg(void** buf, const SMqHbT
int32_t sz = taosArrayGetSize(pTopicInfo->pVgInfo);
tlen += taosEncodeFixedI32(buf, sz);
for (int32_t i = 0; i < sz; i++) {
SMqHbVgInfo* pVgInfo = (SMqHbVgInfo*)taosArrayGet(pTopicInfo->pVgInfo, i);
SMqReportVgInfo* pVgInfo = (SMqReportVgInfo*)taosArrayGet(pTopicInfo->pVgInfo, i);
tlen += taosEncodeSMqVgInfo(buf, pVgInfo);
}
return tlen;
}
static FORCE_INLINE void* taosDecodeSMqHbTopicInfoMsg(void* buf, SMqHbTopicInfo* pTopicInfo) {
static FORCE_INLINE void* taosDecodeSMqTopicInfoMsg(void* buf, SMqTopicInfo* pTopicInfo) {
buf = taosDecodeFixedI32(buf, &pTopicInfo->epoch);
buf = taosDecodeFixedI64(buf, &pTopicInfo->topicUid);
buf = taosDecodeStringTo(buf, pTopicInfo->name);
int32_t sz;
buf = taosDecodeFixedI32(buf, &sz);
pTopicInfo->pVgInfo = taosArrayInit(sz, sizeof(SMqHbVgInfo));
pTopicInfo->pVgInfo = taosArrayInit(sz, sizeof(SMqReportVgInfo));
for (int32_t i = 0; i < sz; i++) {
SMqHbVgInfo vgInfo;
SMqReportVgInfo vgInfo;
buf = taosDecodeSMqVgInfo(buf, &vgInfo);
taosArrayPush(pTopicInfo->pVgInfo, &vgInfo);
}
return buf;
}
typedef struct SMqHbMsg {
typedef struct {
int32_t status; // ask hb endpoint
int32_t epoch;
int64_t consumerId;
SArray* pTopics; // SArray<SMqHbTopicInfo>
} SMqHbMsg;
} SMqReportReq;
static FORCE_INLINE int32_t taosEncodeSMqMsg(void** buf, const SMqHbMsg* pMsg) {
static FORCE_INLINE int32_t taosEncodeSMqReportMsg(void** buf, const SMqReportReq* pMsg) {
int32_t tlen = 0;
tlen += taosEncodeFixedI32(buf, pMsg->status);
tlen += taosEncodeFixedI32(buf, pMsg->epoch);
@ -2401,22 +2417,22 @@ static FORCE_INLINE int32_t taosEncodeSMqMsg(void** buf, const SMqHbMsg* pMsg) {
int32_t sz = taosArrayGetSize(pMsg->pTopics);
tlen += taosEncodeFixedI32(buf, sz);
for (int32_t i = 0; i < sz; i++) {
SMqHbTopicInfo* topicInfo = (SMqHbTopicInfo*)taosArrayGet(pMsg->pTopics, i);
tlen += taosEncodeSMqHbTopicInfoMsg(buf, topicInfo);
SMqTopicInfo* topicInfo = (SMqTopicInfo*)taosArrayGet(pMsg->pTopics, i);
tlen += taosEncodeSMqTopicInfoMsg(buf, topicInfo);
}
return tlen;
}
static FORCE_INLINE void* taosDecodeSMqMsg(void* buf, SMqHbMsg* pMsg) {
static FORCE_INLINE void* taosDecodeSMqReportMsg(void* buf, SMqReportReq* pMsg) {
buf = taosDecodeFixedI32(buf, &pMsg->status);
buf = taosDecodeFixedI32(buf, &pMsg->epoch);
buf = taosDecodeFixedI64(buf, &pMsg->consumerId);
int32_t sz;
buf = taosDecodeFixedI32(buf, &sz);
pMsg->pTopics = taosArrayInit(sz, sizeof(SMqHbTopicInfo));
pMsg->pTopics = taosArrayInit(sz, sizeof(SMqTopicInfo));
for (int32_t i = 0; i < sz; i++) {
SMqHbTopicInfo topicInfo;
buf = taosDecodeSMqHbTopicInfoMsg(buf, &topicInfo);
SMqTopicInfo topicInfo;
buf = taosDecodeSMqTopicInfoMsg(buf, &topicInfo);
taosArrayPush(pMsg->pTopics, &topicInfo);
}
return buf;
@ -2921,89 +2937,6 @@ typedef struct {
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
#if 0
typedef struct {
SMqRspHead head;
int64_t reqOffset;
int64_t rspOffset;
int32_t skipLogNum;
int32_t blockNum;
int8_t withTbName;
int8_t withSchema;
SArray* blockDataLen; // SArray<int32_t>
SArray* blockData; // SArray<SRetrieveTableRsp*>
SArray* blockTbName; // SArray<char*>
SArray* blockSchema; // SArray<SSchemaWrapper>
} SMqDataBlkRsp;
static FORCE_INLINE int32_t tEncodeSMqDataBlkRsp(void** buf, const SMqDataBlkRsp* pRsp) {
int32_t tlen = 0;
tlen += taosEncodeFixedI64(buf, pRsp->reqOffset);
tlen += taosEncodeFixedI64(buf, pRsp->rspOffset);
tlen += taosEncodeFixedI32(buf, pRsp->skipLogNum);
tlen += taosEncodeFixedI32(buf, pRsp->blockNum);
if (pRsp->blockNum != 0) {
tlen += taosEncodeFixedI8(buf, pRsp->withTbName);
tlen += taosEncodeFixedI8(buf, pRsp->withSchema);
for (int32_t i = 0; i < pRsp->blockNum; i++) {
int32_t bLen = *(int32_t*)taosArrayGet(pRsp->blockDataLen, i);
void* data = taosArrayGetP(pRsp->blockData, i);
tlen += taosEncodeFixedI32(buf, bLen);
tlen += taosEncodeBinary(buf, data, bLen);
if (pRsp->withSchema) {
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(pRsp->blockSchema, i);
tlen += taosEncodeSSchemaWrapper(buf, pSW);
}
if (pRsp->withTbName) {
char* tbName = (char*)taosArrayGetP(pRsp->blockTbName, i);
tlen += taosEncodeString(buf, tbName);
}
}
}
return tlen;
}
static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* pRsp) {
buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
buf = taosDecodeFixedI32(buf, &pRsp->skipLogNum);
buf = taosDecodeFixedI32(buf, &pRsp->blockNum);
if (pRsp->blockNum != 0) {
pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void*));
pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
buf = taosDecodeFixedI8(buf, &pRsp->withTbName);
buf = taosDecodeFixedI8(buf, &pRsp->withSchema);
if (pRsp->withTbName) {
pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*));
}
if (pRsp->withSchema) {
pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*));
}
for (int32_t i = 0; i < pRsp->blockNum; i++) {
int32_t bLen = 0;
void* data = NULL;
buf = taosDecodeFixedI32(buf, &bLen);
buf = taosDecodeBinary(buf, &data, bLen);
taosArrayPush(pRsp->blockDataLen, &bLen);
taosArrayPush(pRsp->blockData, &data);
if (pRsp->withSchema) {
SSchemaWrapper* pSW = (SSchemaWrapper*)taosMemoryMalloc(sizeof(SSchemaWrapper));
buf = taosDecodeSSchemaWrapper(buf, pSW);
taosArrayPush(pRsp->blockSchema, &pSW);
}
if (pRsp->withTbName) {
char* name = NULL;
buf = taosDecodeString(buf, &name);
taosArrayPush(pRsp->blockTbName, &name);
}
}
}
return (void*)buf;
}
#endif
typedef struct {
SMqRspHead head;
char cgroup[TSDB_CGROUP_LEN];

View File

@ -144,6 +144,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_MQ_ASK_EP, "ask-ep", SMqAskEpReq, SMqAskEpRsp)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_LOST, "consumer-lost", SMqConsumerLostMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_CONSUMER_RECOVER, "consumer-recover", SMqConsumerRecoverMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_HB, "consumer-hb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DO_REBALANCE, "do-rebalance", SMqDoRebalanceMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_DROP_CGROUP, "drop-cgroup", SMqDropCGroupReq, SMqDropCGroupRsp)
TD_DEF_MSG_TYPE(TDMT_MND_MQ_COMMIT_OFFSET, "mnode-commit-offset", SMqCMCommitOffsetReq, SMqCMCommitOffsetRsp)

View File

@ -170,6 +170,7 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_SHOW_STREAMS_STMT,
QUERY_NODE_SHOW_TABLES_STMT,
QUERY_NODE_SHOW_TAGS_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_SHOW_LICENCE_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,

View File

@ -135,7 +135,7 @@ void rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg
int32_t rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
void rpcSendRecv(void *shandle, SEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
void rpcSetDefaultAddr(void *thandle, const char *ip, const char *fqdn);
int64_t rpcAllocHandle();
void* rpcAllocHandle();
#ifdef __cplusplus
}

View File

@ -171,6 +171,7 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
pTscObj->connId = pRsp->query->connId;
tscTrace("conn %p hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes, pTscObj->pAppInfo->totalDnodes);
if (pRsp->query->killRid) {
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
@ -294,6 +295,7 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
if (code != 0) {
(*pInst)->onlineDnodes = ((*pInst)->totalDnodes ? 0 : -1);
tscDebug("hb rsp error %s, update server status %d/%d", tstrerror(code), (*pInst)->onlineDnodes, (*pInst)->totalDnodes);
}
if (rspNum) {
@ -571,7 +573,8 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
return TSDB_CODE_SUCCESS;
}
void hbMgrInitMqHbHandle() {
static FORCE_INLINE void hbMgrInitHandle() {
// init all handle
clientHbMgr.reqHandle[CONN_TYPE__QUERY] = hbQueryHbReqHandle;
clientHbMgr.reqHandle[CONN_TYPE__TMQ] = hbMqHbReqHandle;
@ -579,11 +582,6 @@ void hbMgrInitMqHbHandle() {
clientHbMgr.rspHandle[CONN_TYPE__TMQ] = hbMqHbRspHandle;
}
static FORCE_INLINE void hbMgrInitHandle() {
// init all handle
hbMgrInitMqHbHandle();
}
SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
SClientHbBatchReq *pBatchReq = taosMemoryCalloc(1, sizeof(SClientHbBatchReq));
if (pBatchReq == NULL) {

View File

@ -1276,7 +1276,12 @@ int32_t doProcessMsgFromServer(void* param) {
assert(pMsg->info.ahandle != NULL);
STscObj* pTscObj = NULL;
tscDebug("processMsgFromServer message: %s, code: %s", TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code));
STraceId* trace = &pMsg->info.traceId;
char tbuf[40] = {0};
TRACE_TO_STR(trace, tbuf);
tscDebug("processMsgFromServer handle %p, message: %s, code: %s, gtid: %s", pMsg->info.handle, TMSG_INFO(pMsg->msgType), tstrerror(pMsg->code),
tbuf);
if (pSendInfo->requestObjRefId != 0) {
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);

File diff suppressed because it is too large Load Diff

View File

@ -153,6 +153,15 @@ static const SSysDbTableSchema userTblsSchema[] = {
{.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema userTagsSchema[] = {
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "tag_type", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema userTblDistSchema[] = {
{.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
@ -255,6 +264,7 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)},
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
{TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)},
{TSDB_INS_TABLE_USER_TAGS, userTagsSchema, tListLen(userTagsSchema)},
// {TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
{TSDB_INS_TABLE_USER_USERS, userUsersSchema, tListLen(userUsersSchema)},
{TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)},

View File

@ -1754,7 +1754,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows;
int32_t len = 0;
len += snprintf(dumpBuf + len, size - len, "%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld|======\n", "dumpBlockData",
len += snprintf(dumpBuf + len, size - len, "===stream===%s |block type %d |child id %d|group id:%" PRIu64 "| uid:%ld|\n", flag,
(int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId,
pDataBlock->info.uid);
if (len >= size - 1) return dumpBuf;

View File

@ -196,6 +196,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_TOPIC, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_SUBSCRIBE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_ASK_EP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_HB, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_DROP_CGROUP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_MQ_COMMIT_OFFSET, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -81,6 +81,7 @@ int32_t qmPutRpcMsgToQueue(SQnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
taosWriteQitem(pMgmt->queryWorker.queue, pMsg);
return 0;
case READ_QUEUE:
case FETCH_QUEUE:
dTrace("msg:%p, is created and will put into qnode-fetch queue", pMsg);
taosWriteQitem(pMgmt->fetchWorker.queue, pMsg);
return 0;

View File

@ -48,6 +48,7 @@ static void mndCancelGetNextConsumer(SMnode *pMnode, void *pIter);
static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg);
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg);
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg);
static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg);
static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg);
static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg);
@ -62,6 +63,7 @@ int32_t mndInitConsumer(SMnode *pMnode) {
.deleteFp = (SdbDeleteFp)mndConsumerActionDelete};
mndSetMsgHandle(pMnode, TDMT_MND_SUBSCRIBE, mndProcessSubscribeReq);
mndSetMsgHandle(pMnode, TDMT_MND_MQ_HB, mndProcessMqHbReq);
mndSetMsgHandle(pMnode, TDMT_MND_MQ_ASK_EP, mndProcessAskEpReq);
mndSetMsgHandle(pMnode, TDMT_MND_MQ_TIMER, mndProcessMqTimerMsg);
mndSetMsgHandle(pMnode, TDMT_MND_MQ_CONSUMER_LOST, mndProcessConsumerLostMsg);
@ -255,24 +257,15 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
return 0;
}
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
SMqAskEpReq *pReq = (SMqAskEpReq *)pMsg->pCont;
SMqAskEpRsp rsp = {0};
int64_t consumerId = be64toh(pReq->consumerId);
int32_t epoch = ntohl(pReq->epoch);
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
SMqHbReq *pReq = (SMqHbReq *)pMsg->pCont;
int64_t consumerId = be64toh(pReq->consumerId);
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMsg->info.node, consumerId);
if (pConsumer == NULL) {
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
return -1;
}
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
/*int32_t hbStatus = atomic_load_32(&pConsumer->hbStatus);*/
atomic_store_32(&pConsumer->hbStatus, 0);
// 1. check consumer status
int32_t status = atomic_load_32(&pConsumer->status);
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
@ -286,6 +279,46 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
}
mndReleaseConsumer(pMnode, pConsumer);
return 0;
}
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
SMqAskEpReq *pReq = (SMqAskEpReq *)pMsg->pCont;
SMqAskEpRsp rsp = {0};
int64_t consumerId = be64toh(pReq->consumerId);
int32_t epoch = ntohl(pReq->epoch);
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
if (pConsumer == NULL) {
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
return -1;
}
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
#if 1
atomic_store_32(&pConsumer->hbStatus, 0);
#endif
// 1. check consumer status
int32_t status = atomic_load_32(&pConsumer->status);
#if 1
if (status == MQ_CONSUMER_STATUS__LOST_REBD) {
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
pRecoverMsg->consumerId = consumerId;
SRpcMsg *pRpcMsg = taosMemoryCalloc(1, sizeof(SRpcMsg));
pRpcMsg->msgType = TDMT_MND_MQ_CONSUMER_RECOVER;
pRpcMsg->pCont = pRecoverMsg;
pRpcMsg->contLen = sizeof(SMqConsumerRecoverMsg);
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
}
#endif
if (status != MQ_CONSUMER_STATUS__READY) {
terrno = TSDB_CODE_MND_CONSUMER_NOT_READY;
return -1;

View File

@ -388,67 +388,7 @@ static void mndCancelGetNextApp(SMnode *pMnode, void *pIter) {
}
static SClientHbRsp *mndMqHbBuildRsp(SMnode *pMnode, SClientHbReq *pReq) {
#if 0
SClientHbRsp* pRsp = taosMemoryMalloc(sizeof(SClientHbRsp));
if (pRsp == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pRsp->connKey = pReq->connKey;
SMqHbBatchRsp batchRsp;
batchRsp.batchRsps = taosArrayInit(0, sizeof(SMqHbRsp));
if (batchRsp.batchRsps == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
SClientHbKey connKey = pReq->connKey;
SHashObj* pObj = pReq->info;
SKv* pKv = taosHashGet(pObj, "mq-tmp", strlen("mq-tmp") + 1);
if (pKv == NULL) {
taosMemoryFree(pRsp);
return NULL;
}
SMqHbMsg mqHb;
taosDecodeSMqMsg(pKv->value, &mqHb);
/*int64_t clientUid = htonl(pKv->value);*/
/*if (mqHb.epoch )*/
int sz = taosArrayGetSize(mqHb.pTopics);
SMqConsumerObj* pConsumer = mndAcquireConsumer(pMnode, mqHb.consumerId);
for (int i = 0; i < sz; i++) {
SMqHbOneTopicBatchRsp innerBatchRsp;
innerBatchRsp.rsps = taosArrayInit(sz, sizeof(SMqHbRsp));
if (innerBatchRsp.rsps == NULL) {
//TODO
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
SMqHbTopicInfo* topicInfo = taosArrayGet(mqHb.pTopics, i);
SMqConsumerTopic* pConsumerTopic = taosHashGet(pConsumer->topicHash, topicInfo->name, strlen(topicInfo->name)+1);
if (pConsumerTopic->epoch != topicInfo->epoch) {
//add new vgids into rsp
int vgSz = taosArrayGetSize(topicInfo->pVgInfo);
for (int j = 0; j < vgSz; j++) {
SMqHbRsp innerRsp;
SMqHbVgInfo* pVgInfo = taosArrayGet(topicInfo->pVgInfo, i);
SVgObj* pVgObj = mndAcquireVgroup(pMnode, pVgInfo->vgId);
innerRsp.epSet = mndGetVgroupEpset(pMnode, pVgObj);
taosArrayPush(innerBatchRsp.rsps, &innerRsp);
}
}
taosArrayPush(batchRsp.batchRsps, &innerBatchRsp);
}
int32_t tlen = taosEncodeSMqHbBatchRsp(NULL, &batchRsp);
void* buf = taosMemoryMalloc(tlen);
if (buf == NULL) {
//TODO
return NULL;
}
void* abuf = buf;
taosEncodeSMqHbBatchRsp(&abuf, &batchRsp);
pRsp->body = buf;
pRsp->bodyLen = tlen;
return pRsp;
#endif
//
return NULL;
}

View File

@ -76,6 +76,8 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_STB;
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, len) == 0) {
type = TSDB_MGMT_TABLE_TABLE;
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_TAGS, len) == 0) {
type = TSDB_MGMT_TABLE_TAG;
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, len) == 0) {
// type = TSDB_MGMT_TABLE_DIST;
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_USERS, len) == 0) {

View File

@ -709,7 +709,7 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) {
_OVER:
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
mError("sma:%s, failed to create since %s", createReq.name, terrstr(terrno));
mError("sma:%s, failed to create since %s", createReq.name, terrstr());
}
mndReleaseStb(pMnode, pStb);

View File

@ -1689,6 +1689,9 @@ static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *p
_OVER:
taosMemoryFreeClear(stbObj.pTags);
taosMemoryFreeClear(stbObj.pColumns);
if (pAlter->commentLen > 0) {
taosMemoryFreeClear(stbObj.comment);
}
return code;
}
@ -1733,7 +1736,7 @@ _OVER:
mndReleaseStb(pMnode, pStb);
mndReleaseDb(pMnode, pDb);
taosArrayDestroy(alterReq.pFields);
tFreeSMAltertbReq(&alterReq);
return code;
}

View File

@ -639,6 +639,7 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
if (pShow->pIter == NULL) break;
if (pDb != NULL && pVgroup->dbUid != pDb->uid) {
sdbRelease(pSdb, pVgroup);
continue;
}

View File

@ -32,6 +32,8 @@ extern "C" {
#define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
#define RSMA_TASK_INFO_HASH_SLOT 8
typedef struct SSmaEnv SSmaEnv;
typedef struct SSmaStat SSmaStat;
typedef struct STSmaStat STSmaStat;
@ -41,7 +43,7 @@ typedef struct SRSmaInfo SRSmaInfo;
typedef struct SRSmaInfoItem SRSmaInfoItem;
struct SSmaEnv {
TdThreadRwlock lock;
SRWLatch lock;
int8_t type;
SSmaStat *pStat;
};
@ -52,7 +54,7 @@ typedef struct {
void *tmrHandle; // shared by all fetch tasks
} SSmaMgmt;
#define SMA_ENV_LOCK(env) ((env)->lock)
#define SMA_ENV_LOCK(env) (&(env)->lock)
#define SMA_ENV_TYPE(env) ((env)->type)
#define SMA_ENV_STAT(env) ((env)->pStat)
@ -64,10 +66,14 @@ struct STSmaStat {
struct SRSmaStat {
SSma *pSma;
int64_t submitVer;
int64_t refId; // shared by fetch tasks
int8_t triggerStat; // shared by fetch tasks
SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
int64_t commitAppliedVer; // vnode applied version for async commit
int64_t commitSubmitVer; // rsma submit version for async commit
int64_t submitVer; // latest submit version
int64_t refId; // shared by fetch tasks
int8_t triggerStat; // shared by fetch tasks
int8_t commitStat; // 0 not in committing, 1 in committing
SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
SHashObj *iRsmaInfoHash; // key: stbUid, value: SRSmaInfo; immutable rsmaInfoHash
};
struct SSmaStat {
@ -78,12 +84,29 @@ struct SSmaStat {
T_REF_DECLARE()
};
#define SMA_TSMA_STAT(s) (&(s)->tsmaStat)
#define SMA_RSMA_STAT(s) (&(s)->rsmaStat)
#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash)
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
#define RSMA_REF_ID(r) ((r)->refId)
#define RSMA_SUBMIT_VER(r) ((r)->submitVer)
#define SMA_TSMA_STAT(s) (&(s)->tsmaStat)
#define SMA_RSMA_STAT(s) (&(s)->rsmaStat)
#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash)
#define RSMA_IMU_INFO_HASH(r) ((r)->iRsmaInfoHash)
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
#define RSMA_REF_ID(r) ((r)->refId)
#define RSMA_SUBMIT_VER(r) ((r)->submitVer)
struct SRSmaInfoItem {
void *taskInfo; // qTaskInfo_t
int64_t refId;
tmr_h tmrId;
int32_t maxDelay;
int8_t level;
int8_t triggerStat;
};
struct SRSmaInfo {
STSchema *pTSchema;
int64_t suid;
SRSmaInfoItem items[TSDB_RETENTION_L2];
};
enum {
TASK_TRIGGER_STAT_INIT = 0,
@ -94,6 +117,14 @@ enum {
TASK_TRIGGER_STAT_DROPPED = 5,
};
enum {
RSMA_ROLE_CREATE = 0,
RSMA_ROLE_DROP = 1,
RSMA_ROLE_FETCH = 2,
RSMA_ROLE_SUBMIT = 3,
RSMA_ROLE_ITERATE = 4,
};
void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
@ -112,33 +143,6 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType);
int32_t tdLockSma(SSma *pSma);
int32_t tdUnLockSma(SSma *pSma);
static FORCE_INLINE int32_t tdRLockSmaEnv(SSmaEnv *pEnv) {
int code = taosThreadRwlockRdlock(&(pEnv->lock));
if (code != 0) {
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
return 0;
}
static FORCE_INLINE int32_t tdWLockSmaEnv(SSmaEnv *pEnv) {
int code = taosThreadRwlockWrlock(&(pEnv->lock));
if (code != 0) {
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
return 0;
}
static FORCE_INLINE int32_t tdUnLockSmaEnv(SSmaEnv *pEnv) {
int code = taosThreadRwlockUnlock(&(pEnv->lock));
if (code != 0) {
terrno = TAOS_SYSTEM_ERROR(code);
return -1;
}
return 0;
}
static FORCE_INLINE int8_t tdSmaStat(STSmaStat *pTStat) {
if (pTStat) {
return atomic_load_8(&pTStat->state);
@ -184,10 +188,12 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) {
}
}
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc);
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat);
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
int32_t tdProcessRSmaRestoreImpl(SSma *pSma);

View File

@ -164,9 +164,12 @@ void smaCleanUp();
int32_t smaOpen(SVnode* pVnode);
int32_t smaClose(SSma* pSma);
int32_t smaBegin(SSma* pSma);
int32_t smaPreCommit(SSma* pSma);
int32_t smaCommit(SSma* pSma);
int32_t smaPostCommit(SSma* pSma);
int32_t smaSyncPreCommit(SSma* pSma);
int32_t smaSyncCommit(SSma* pSma);
int32_t smaSyncPostCommit(SSma* pSma);
int32_t smaAsyncPreCommit(SSma* pSma);
int32_t smaAsyncCommit(SSma* pSma);
int32_t smaAsyncPostCommit(SSma* pSma);
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);

View File

@ -15,9 +15,13 @@
#include "sma.h"
static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma);
static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat);
/**
* @brief Only applicable to Rollup SMA
@ -25,7 +29,7 @@ static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma);
* @param pSma
* @return int32_t
*/
int32_t smaPreCommit(SSma *pSma) { return tdProcessRSmaPreCommitImpl(pSma); }
int32_t smaSyncPreCommit(SSma *pSma) { return tdProcessRSmaSyncPreCommitImpl(pSma); }
/**
* @brief Only applicable to Rollup SMA
@ -33,7 +37,7 @@ int32_t smaPreCommit(SSma *pSma) { return tdProcessRSmaPreCommitImpl(pSma); }
* @param pSma
* @return int32_t
*/
int32_t smaCommit(SSma *pSma) { return tdProcessRSmaCommitImpl(pSma); }
int32_t smaSyncCommit(SSma *pSma) { return tdProcessRSmaSyncCommitImpl(pSma); }
/**
* @brief Only applicable to Rollup SMA
@ -41,7 +45,31 @@ int32_t smaCommit(SSma *pSma) { return tdProcessRSmaCommitImpl(pSma); }
* @param pSma
* @return int32_t
*/
int32_t smaPostCommit(SSma *pSma) { return tdProcessRSmaPostCommitImpl(pSma); }
int32_t smaSyncPostCommit(SSma *pSma) { return tdProcessRSmaSyncPostCommitImpl(pSma); }
/**
* @brief Only applicable to Rollup SMA
*
* @param pSma
* @return int32_t
*/
int32_t smaAsyncPreCommit(SSma *pSma) { return tdProcessRSmaAsyncPreCommitImpl(pSma); }
/**
* @brief Only applicable to Rollup SMA
*
* @param pSma
* @return int32_t
*/
int32_t smaAsyncCommit(SSma *pSma) { return tdProcessRSmaAsyncCommitImpl(pSma); }
/**
* @brief Only applicable to Rollup SMA
*
* @param pSma
* @return int32_t
*/
int32_t smaAsyncPostCommit(SSma *pSma) { return tdProcessRSmaAsyncPostCommitImpl(pSma); }
/**
* @brief set rsma trigger stat active
@ -62,18 +90,17 @@ int32_t smaBegin(SSma *pSma) {
atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED, TASK_TRIGGER_STAT_ACTIVE);
switch (rsmaTriggerStat) {
case TASK_TRIGGER_STAT_PAUSED: {
smaDebug("vgId:%d rsma trigger stat from paused to active", SMA_VID(pSma));
smaDebug("vgId:%d, rsma trigger stat from paused to active", SMA_VID(pSma));
break;
}
case TASK_TRIGGER_STAT_INIT: {
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE);
smaDebug("vgId:%d rsma trigger stat from init to active", SMA_VID(pSma));
smaDebug("vgId:%d, rsma trigger stat from init to active", SMA_VID(pSma));
break;
}
default: {
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_ACTIVE);
smaWarn("vgId:%d rsma trigger stat %" PRIi8 " is unexpected", SMA_VID(pSma), rsmaTriggerStat);
ASSERT(0);
smaError("vgId:%d, rsma trigger stat %" PRIi8 " is unexpected", SMA_VID(pSma), rsmaTriggerStat);
break;
}
}
@ -81,7 +108,7 @@ int32_t smaBegin(SSma *pSma) {
}
/**
* @brief pre-commit for rollup sma.
* @brief pre-commit for rollup sma(sync commit).
* 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED.
* 2) wait all triggered fetch tasks finished
* 3) perform persist task for qTaskInfo
@ -89,7 +116,7 @@ int32_t smaBegin(SSma *pSma) {
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma) {
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) {
return TSDB_CODE_SUCCESS;
@ -98,8 +125,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
// step 1: set persistence task paused
// step 1: set rsma stat paused
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
// step 2: wait all triggered fetch tasks finished
@ -119,7 +145,9 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
}
// step 3: perform persist task for qTaskInfo
tdRSmaPersistExecImpl(pRSmaStat);
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
pRSmaStat->commitSubmitVer = pRSmaStat->submitVer;
tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat));
smaDebug("vgId:%d, rsma pre commit success", SMA_VID(pSma));
@ -132,7 +160,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaCommitImpl(SSma *pSma) {
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma) {
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) {
return TSDB_CODE_SUCCESS;
@ -140,21 +168,9 @@ static int32_t tdProcessRSmaCommitImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
/**
* @brief post-commit for rollup sma
* 1) clean up the outdated qtaskinfo files
*
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma) {
SVnode *pVnode = pSma->pVnode;
if (!VND_IS_RSMA(pVnode)) {
return TSDB_CODE_SUCCESS;
}
int64_t committed = pVnode->state.committed;
static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
SVnode *pVnode = pSma->pVnode;
int64_t committed = pRSmaStat->commitAppliedVer;
TdDirPtr pDir = NULL;
TdDirEntryPtr pDirEntry = NULL;
char dir[TSDB_FILENAME_LEN];
@ -222,5 +238,159 @@ static int32_t tdProcessRSmaPostCommitImpl(SSma *pSma) {
taosCloseDir(&pDir);
regfree(&regex);
return TSDB_CODE_SUCCESS;
}
/**
* @brief post-commit for rollup sma
* 1) clean up the outdated qtaskinfo files
*
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
SVnode *pVnode = pSma->pVnode;
if (!VND_IS_RSMA(pVnode)) {
return TSDB_CODE_SUCCESS;
}
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(SMA_ENV_STAT(pSmaEnv));
// cleanup outdated qtaskinfo files
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
return TSDB_CODE_SUCCESS;
}
/**
* @brief Rsma async commit implementation
* 1) set rsma stat TASK_TRIGGER_STAT_PAUSED
* 2) Wait all running fetch task finish to fetch and put submitMsg into level 2/3 wQueue(blocking level 1 write)
* 3)
*
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) {
return TSDB_CODE_SUCCESS;
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
// step 2: wait all triggered fetch tasks finished
int32_t nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma));
break;
} else {
smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma));
}
++nLoops;
if (nLoops > 1000) {
sched_yield();
nLoops = 0;
}
}
// step 3: swap rsmaInfoHash and iRsmaInfoHash
ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat));
ASSERT(RSMA_INFO_HASH(pRSmaStat));
RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat);
RSMA_INFO_HASH(pRSmaStat) =
taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (!RSMA_INFO_HASH(pRSmaStat)) {
smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr());
return TSDB_CODE_FAILED;
}
// step 4: others
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
pRSmaStat->commitSubmitVer = pRSmaStat->submitVer;
return TSDB_CODE_SUCCESS;
}
/**
* @brief commit for rollup sma
*
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) {
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) {
return TSDB_CODE_SUCCESS;
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
// perform persist task for qTaskInfo
tdRSmaPersistExecImpl(pRSmaStat, RSMA_IMU_INFO_HASH(pRSmaStat));
return TSDB_CODE_SUCCESS;
}
/**
* @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsmaInfoHash not empty.
*
* @param pSma
* @return int32_t
*/
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
if (!pSmaEnv) {
return TSDB_CODE_SUCCESS;
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
// step 1: merge rsmaInfoHash and iRsmaInfoHash
taosWLockLatch(SMA_ENV_LOCK(pSmaEnv));
if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
// TODO: optimization - just switch the hash pointer if rsmaInfoHash is empty
}
void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL);
while (pIter) {
tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) {
taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma),
*pSuid);
} else {
// free the resources
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
tdFreeRSmaInfo(pSma, pRSmaInfo, false);
smaDebug("vgId:%d, rsma async post commit, free rsma info since already COW for table:%" PRIi64, SMA_VID(pSma),
*pSuid);
}
pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
}
taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat));
RSMA_IMU_INFO_HASH(pRSmaStat) = NULL;
taosWUnLockLatch(SMA_ENV_LOCK(pSmaEnv));
// step 2: cleanup outdated qtaskinfo files
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
return TSDB_CODE_SUCCESS;
}

View File

@ -17,7 +17,6 @@
typedef struct SSmaStat SSmaStat;
#define RSMA_TASK_INFO_HASH_SLOT 8
#define SMA_MGMT_REF_NUM 10240
extern SSmaMgmt smaMgmt;
@ -109,12 +108,7 @@ static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path)
SMA_ENV_TYPE(pEnv) = smaType;
int code = taosThreadRwlockInit(&(pEnv->lock), NULL);
if (code) {
terrno = TAOS_SYSTEM_ERROR(code);
taosMemoryFree(pEnv);
return NULL;
}
taosInitRWLatch(&(pEnv->lock));
if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) {
tdFreeSmaEnv(pEnv);
@ -148,7 +142,6 @@ static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEn
void tdDestroySmaEnv(SSmaEnv *pSmaEnv) {
if (pSmaEnv) {
pSmaEnv->pStat = tdFreeSmaState(pSmaEnv->pStat, SMA_ENV_TYPE(pSmaEnv));
taosThreadRwlockDestroy(&(pSmaEnv->lock));
}
}
@ -260,7 +253,7 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL);
while (infoHash) {
SRSmaInfo *pSmaInfo = *(SRSmaInfo **)infoHash;
tdFreeRSmaInfo(pSma, pSmaInfo);
tdFreeRSmaInfo(pSma, pSmaInfo, true);
infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash);
}
}
@ -311,7 +304,6 @@ int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) {
smaError("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " failed since %s", SMA_VID(pRSmaStat->pSma),
RSMA_REF_ID(pRSmaStat), smaMgmt.rsetId, terrstr());
ASSERT(0);
} else {
smaDebug("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " succeed", SMA_VID(pRSmaStat->pSma),
RSMA_REF_ID(pRSmaStat), smaMgmt.rsetId);

View File

@ -48,20 +48,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables);
static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int64_t *committed);
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed);
struct SRSmaInfoItem {
void *taskInfo; // qTaskInfo_t
int64_t refId;
tmr_h tmrId;
int32_t maxDelay;
int8_t level;
int8_t triggerStat;
};
struct SRSmaInfo {
STSchema *pTSchema;
int64_t suid;
SRSmaInfoItem items[TSDB_RETENTION_L2];
};
static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) {
// adapt accordingly if definition of SRSmaInfo update
@ -102,8 +89,9 @@ static FORCE_INLINE int32_t tdRSmaQTaskInfoContLen(int32_t lenWithHead) {
static FORCE_INLINE void tdRSmaQTaskInfoIterDestroy(SRSmaQTaskInfoIter *pIter) { taosMemoryFreeClear(pIter->pBuf); }
static FORCE_INLINE void tdFreeTaskHandle(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
// Note: free/kill may in RC
if (!taskHandle) return;
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
smaDebug("vgId:%d, free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
@ -111,25 +99,36 @@ static FORCE_INLINE void tdFreeTaskHandle(qTaskInfo_t *taskHandle, int32_t vgId,
} else {
smaDebug("vgId:%d, not free qTaskInfo_t %p of level %d", vgId, otaskHandle, level);
}
// TODO: clear files related to qTaskInfo?
}
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
/**
* @brief general function to free rsmaInfo
*
* @param pSma
* @param pInfo
* @param isDeepFree Only stop tmrId and free pTSchema for deep free
* @return void*
*/
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) {
if (pInfo) {
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
SRSmaInfoItem *pItem = &pInfo->items[i];
if (pItem->taskInfo) {
if (pItem->tmrId) {
if (isDeepFree && pItem->tmrId) {
smaDebug("vgId:%d, table %" PRIi64 " stop fetch timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId,
i + 1);
taosTmrStopA(&pItem->tmrId);
}
tdFreeTaskHandle(&pItem->taskInfo, SMA_VID(pSma), i + 1);
tdFreeQTaskInfo(&pItem->taskInfo, SMA_VID(pSma), i + 1);
} else {
smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma),
pInfo->suid, i + 1);
}
}
taosMemoryFree(pInfo->pTSchema);
if (isDeepFree) {
taosMemoryFree(pInfo->pTSchema);
}
taosMemoryFree(pInfo);
}
@ -151,7 +150,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
if (!suid || !tbUids) {
terrno = TSDB_CODE_INVALID_PTR;
smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
return TSDB_CODE_FAILED;
}
@ -165,7 +164,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
if (pRSmaInfo->items[0].taskInfo) {
if ((qUpdateQualifiedTableId(pRSmaInfo->items[0].taskInfo, tbUids, true) < 0)) {
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
return TSDB_CODE_FAILED;
} else {
smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
@ -175,7 +174,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
if (pRSmaInfo->items[1].taskInfo) {
if ((qUpdateQualifiedTableId(pRSmaInfo->items[1].taskInfo, tbUids, true) < 0)) {
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr(terrno));
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
return TSDB_CODE_FAILED;
} else {
smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma),
@ -257,22 +256,22 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui
static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo,
int8_t idx) {
SRetention *pRetention = SMA_RETENTION(pSma);
STsdbCfg *pTsdbCfg = SMA_TSDB_CFG(pSma);
if ((param->qmsgLen > 0) && param->qmsg[idx]) {
SRetention *pRetention = SMA_RETENTION(pSma);
STsdbCfg *pTsdbCfg = SMA_TSDB_CFG(pSma);
SVnode *pVnode = pSma->pVnode;
SReadHandle handle = {
.meta = pVnode->pMeta,
.vnode = pVnode,
.initTqReader = 1,
};
SReadHandle handle = {
.meta = pSma->pVnode->pMeta,
.vnode = pSma->pVnode,
.initTqReader = 1,
};
if (param->qmsg[idx]) {
SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]);
pItem->refId = RSMA_REF_ID(pStat);
pItem->taskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
if (!pItem->taskInfo) {
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
goto _err;
return TSDB_CODE_FAILED;
}
pItem->triggerStat = TASK_TRIGGER_STAT_INACTIVE;
if (param->maxdelay[idx] < TSDB_MIN_ROLLUP_MAX_DELAY) {
@ -286,13 +285,11 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
pItem->maxDelay = TSDB_MAX_ROLLUP_MAX_DELAY;
}
pItem->level = idx == 0 ? TSDB_RETENTION_L1 : TSDB_RETENTION_L2;
smaInfo("vgId:%d table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
smaInfo("vgId:%d, table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
", finally maxdelay:%" PRIi32,
SMA_VID(pSma), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
TD_VID(pVnode), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
}
return TSDB_CODE_SUCCESS;
_err:
return TSDB_CODE_FAILED;
}
/**
@ -357,7 +354,7 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
return TSDB_CODE_SUCCESS;
_err:
tdFreeRSmaInfo(pSma, pRSmaInfo);
tdFreeRSmaInfo(pSma, pRSmaInfo, true);
return TSDB_CODE_FAILED;
}
@ -562,7 +559,9 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
SSDataBlock *output = NULL;
uint64_t ts;
if (qExecTask(pItem->taskInfo, &output, &ts) < 0) {
ASSERT(false);
smaError("vgId:%d, qExecTask for rsma table %" PRIi64 "l evel %" PRIi8 " failed since %s", SMA_VID(pSma), suid,
pItem->level, terrstr());
goto _err;
}
if (!output) {
break;
@ -572,7 +571,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
pResult = taosArrayInit(1, sizeof(SSDataBlock));
if (!pResult) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
goto _err;
}
}
@ -649,9 +648,18 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
return TSDB_CODE_SUCCESS;
}
/**
* @brief During async commit, the SRSmaInfo object would be COW from iRSmaInfoHash and write lock should be applied.
*
* @param pSma
* @param suid
* @return SRSmaInfo*
*/
static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = NULL;
SRSmaInfo *pRSmaInfo = NULL;
if (!pEnv) {
// only applicable when rsma env exists
return NULL;
@ -662,11 +670,37 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
return NULL;
}
SRSmaInfo *pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
if (!pRSmaInfo || !(pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
return pRSmaInfo;
}
if (RSMA_COMMIT_STAT(pStat) == 0) {
return NULL;
}
return pRSmaInfo;
// clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat
SRSmaInfo *pCowRSmaInfo = NULL;
// lock
taosWLockLatch(SMA_ENV_LOCK(pEnv));
void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
if (iRSmaInfo) {
SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo;
if (pIRSmaInfo) {
if (tdCloneRSmaInfo(pSma, pCowRSmaInfo, pIRSmaInfo) < 0) {
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
return NULL;
}
if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) {
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
}
}
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
return pCowRSmaInfo;
}
static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
@ -891,10 +925,17 @@ int32_t tdProcessRSmaRestoreImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
_err:
smaError("vgId:%d failed to restore rsma task since %s", SMA_VID(pSma), terrstr());
smaError("vgId:%d, failed to restore rsma task since %s", SMA_VID(pSma), terrstr());
return TSDB_CODE_FAILED;
}
/**
* @brief Restore from SRSmaQTaskInfoItem
*
* @param pSma
* @param pItem
* @return int32_t
*/
static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *pItem) {
SRSmaInfo *pRSmaInfo = NULL;
void *qTaskInfo = NULL;
@ -920,7 +961,7 @@ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *
if (qDeserializeTaskStatus(qTaskInfo, pItem->qTaskInfo, pItem->len) < 0) {
smaError("vgId:%d, restore rsma task failed for table:%" PRIi64 " level %d since %s", SMA_VID(pSma), pItem->suid,
pItem->type, terrstr(terrno));
pItem->type, terrstr());
return TSDB_CODE_FAILED;
}
smaDebug("vgId:%d, restore rsma task success for table:%" PRIi64 " level %d", SMA_VID(pSma), pItem->suid,
@ -1067,26 +1108,27 @@ static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, SRSmaQTaskInfoIter *pIter) {
return TSDB_CODE_SUCCESS;
}
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
SSma *pSma = pRSmaStat->pSma;
SVnode *pVnode = pSma->pVnode;
int32_t vid = SMA_VID(pSma);
int64_t toffset = 0;
bool isFileCreated = false;
if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
if (taosHashGetSize(pInfoHash) <= 0) {
return TSDB_CODE_SUCCESS;
}
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL);
void *infoHash = taosHashIterate(pInfoHash, NULL);
if (!infoHash) {
return TSDB_CODE_SUCCESS;
}
STFile tFile = {0};
if (RSMA_SUBMIT_VER(pRSmaStat) > 0) {
#if 0
if (pRSmaStat->commitAppliedVer > 0) {
char qTaskInfoFName[TSDB_FILENAME_LEN];
tdRSmaQTaskInfoGetFName(vid, pSma->pVnode->state.applied, qTaskInfoFName);
tdRSmaQTaskInfoGetFName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
goto _err;
@ -1099,6 +1141,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
isFileCreated = true;
}
#endif
while (infoHash) {
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash;
@ -1114,7 +1157,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
int8_t type = (int8_t)(i + 1);
if (qSerializeTaskStatus(taskInfo, &pOutput, &len) < 0) {
smaError("vgId:%d, rsma, table %" PRIi64 " level %d serialize qTaskInfo failed since %s", vid, pRSmaInfo->suid,
i + 1, terrstr(terrno));
i + 1, terrstr());
goto _err;
}
if (!pOutput || len <= 0) {
@ -1130,7 +1173,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
if (!isFileCreated) {
char qTaskInfoFName[TSDB_FILENAME_LEN];
tdRSmaQTaskInfoGetFName(vid, pSma->pVnode->state.applied, qTaskInfoFName);
tdRSmaQTaskInfoGetFName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
goto _err;
@ -1163,11 +1206,11 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat) {
taosMemoryFree(pOutput);
}
infoHash = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), infoHash);
infoHash = taosHashIterate(pInfoHash, infoHash);
}
if (isFileCreated) {
tFile.info.qTaskInfo.submitVer = atomic_load_64(&pRSmaStat->submitVer);
tFile.info.qTaskInfo.submitVer = atomic_load_64(&pRSmaStat->commitSubmitVer);
if (tdUpdateTFileHeader(&tFile) < 0) {
smaError("vgId:%d, rsma, failed to update tfile %s header since %s", vid, TD_TFILE_FULL_NAME(&tFile),
tstrerror(terrno));
@ -1217,6 +1260,10 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data since stat is %" PRIi8 ", rsetId rsetId:%" PRIi64
" refId:%d",
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pItem->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay > 5000 ? 5000 : pItem->maxDelay, pItem, smaMgmt.tmrHandle,
&pItem->tmrId);
}
return;
}
default:

View File

@ -313,4 +313,99 @@ int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t
return TSDB_CODE_SUCCESS;
}
static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t srcTaskInfo, SRSmaParam *param,
tb_uid_t suid, int8_t idx) {
SVnode *pVnode = pSma->pVnode;
char *pOutput = NULL;
int32_t len = 0;
if (qSerializeTaskStatus(srcTaskInfo, &pOutput, &len) < 0) {
smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid,
terrstr());
goto _err;
}
SReadHandle handle = {
.meta = pVnode->pMeta,
.vnode = pVnode,
.initTqReader = 1,
};
ASSERT(!dstTaskInfo);
dstTaskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
if (!dstTaskInfo) {
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
goto _err;
}
if (qDeserializeTaskStatus(dstTaskInfo, pOutput, len) < 0) {
smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
terrstr());
goto _err;
}
smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid);
taosMemoryFreeClear(pOutput);
return TSDB_CODE_SUCCESS;
_err:
taosMemoryFreeClear(pOutput);
tdFreeQTaskInfo(dstTaskInfo, TD_VID(pVnode), idx + 1);
return TSDB_CODE_FAILED;
}
/**
* @brief pTSchema is shared
*
* @param pSma
* @param pDest
* @param pSrc
* @return int32_t
*/
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc) {
SVnode *pVnode = pSma->pVnode;
SRSmaParam *param = NULL;
if (!pSrc) {
return TSDB_CODE_SUCCESS;
}
if (!pDest) {
pDest = taosMemoryCalloc(1, sizeof(SRSmaInfo));
if (!pDest) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
}
}
memcpy(pDest, pSrc, sizeof(SRSmaInfo));
SMetaReader mr = {0};
metaReaderInit(&mr, SMA_META(pSma), 0);
smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid);
if (metaGetTableEntryByUid(&mr, pSrc->suid) < 0) {
smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid,
terrstr());
goto _err;
}
ASSERT(mr.me.type == TSDB_SUPER_TABLE);
ASSERT(mr.me.uid == pSrc->suid);
if (TABLE_IS_ROLLUP(mr.me.flags)) {
param = &mr.me.stbEntry.rsmaParam;
for (int i = 0; i < TSDB_RETENTION_L2; ++i) {
SRSmaInfoItem *pItem = &pSrc->items[i];
if (pItem->taskInfo) {
tdCloneQTaskInfo(pSma, pDest->items[i].taskInfo, pItem->taskInfo, param, pSrc->suid, i);
}
}
smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid);
}
metaReaderClear(&mr);
return TSDB_CODE_SUCCESS;
_err:
metaReaderClear(&mr);
tdFreeRSmaInfo(pSma, pDest, false);
return TSDB_CODE_FAILED;
}
// ...

View File

@ -233,7 +233,8 @@ int vnodeCommit(SVnode *pVnode) {
walBeginSnapshot(pVnode->pWal, pVnode->state.applied);
// preCommit
smaPreCommit(pVnode->pSma);
// smaSyncPreCommit(pVnode->pSma);
smaAsyncPreCommit(pVnode->pSma);
// commit each sub-system
if (metaCommit(pVnode->pMeta) < 0) {
@ -242,6 +243,8 @@ int vnodeCommit(SVnode *pVnode) {
}
if (VND_IS_RSMA(pVnode)) {
smaAsyncCommit(pVnode->pSma);
if (tsdbCommit(VND_RSMA0(pVnode)) < 0) {
ASSERT(0);
return -1;
@ -276,7 +279,8 @@ int vnodeCommit(SVnode *pVnode) {
pVnode->state.committed = info.state.committed;
// postCommit
smaPostCommit(pVnode->pSma);
// smaSyncPostCommit(pVnode->pSma);
smaAsyncPostCommit(pVnode->pSma);
// apply the commit (TODO)
walEndSnapshot(pVnode->pWal);

View File

@ -689,6 +689,7 @@ _exit:
tEncoderInit(&encoder, pRsp->pCont, pRsp->contLen);
tEncodeSVDropTbBatchRsp(&encoder, &rsp);
tEncoderClear(&encoder);
taosArrayDestroy(rsp.pArray);
return 0;
}
@ -900,7 +901,7 @@ static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *
_err:
tDecoderClear(&coder);
vError("vgId:%d, failed to create tsma %s:%" PRIi64 " version %" PRIi64 "for table %" PRIi64 " since %s",
TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr(terrno));
TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr());
return -1;
}

View File

@ -163,7 +163,7 @@ int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
qDebug("QID:0x%" PRIx64 " the %d task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
return TSDB_CODE_SUCCESS;
}
@ -178,7 +178,7 @@ int32_t ctgInitGetDnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
qDebug("QID:0x%" PRIx64 " the %d task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
return TSDB_CODE_SUCCESS;
}
@ -264,7 +264,7 @@ int32_t ctgInitGetSvrVerTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
qDebug("QID:0x%" PRIx64 " [%dth] task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
return TSDB_CODE_SUCCESS;
}

View File

@ -1981,6 +1981,7 @@ int32_t loadRemoteDataCallback(void* param, SDataBuf* pMsg, int32_t code) {
qDebug("%s fetch rsp received, index:%d, rows:%d", pSourceDataInfo->taskId, index, pRsp->numOfRows);
} else {
pSourceDataInfo->code = code;
qDebug("%s fetch rsp received, index:%d, error:%d", pSourceDataInfo->taskId, index, tstrerror(code));
}
pSourceDataInfo->status = EX_SOURCE_DATA_READY;

View File

@ -1143,7 +1143,7 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC);
// must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
if ( (update || (isSignleIntervalWindow(pInfo) && isCloseWindow(&win, &pInfo->twAggSup)) ) && out) {
if ((update || (isSignleIntervalWindow(pInfo) && isCloseWindow(&win, &pInfo->twAggSup))) && out) {
taosArrayPush(pInfo->tsArray, &rowId);
}
}
@ -1596,7 +1596,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pUpdateRes = createResDataBlock(pDescNode);
pInfo->pCondition = pScanPhyNode->node.pConditions;
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
pInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
pInfo->sessionSup =
(SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
pInfo->groupId = 0;
pInfo->pPullDataRes = createPullDataBlock();
pInfo->pStreamScanOp = pOperator;
@ -1630,7 +1631,8 @@ static void destroySysScanOperator(void* param, int32_t numOfOutput) {
blockDataDestroy(pInfo->pRes);
const char* name = tNameGetTableName(&pInfo->name);
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) {
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0 ||
strncasecmp(name, TSDB_INS_TABLE_USER_TAGS, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) {
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
}
@ -1777,14 +1779,14 @@ static SSDataBlock* doFilterResult(SSysTableScanInfo* pInfo) {
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
}
static SSDataBlock* buildSysTableMetaBlock() {
static SSDataBlock* buildInfoSchemaTableMetaBlock(char* tableName) {
size_t size = 0;
const SSysTableMeta* pMeta = NULL;
getInfosDbMeta(&pMeta, &size);
int32_t index = 0;
for (int32_t i = 0; i < size; ++i) {
if (strcmp(pMeta[i].name, TSDB_INS_TABLE_USER_TABLES) == 0) {
if (strcmp(pMeta[i].name, tableName) == 0) {
index = i;
break;
}
@ -1800,185 +1802,418 @@ static SSDataBlock* buildSysTableMetaBlock() {
return pBlock;
}
// TODO: check more datatype, json? and return detailed error when len is not enough
static int32_t convertTagDataToTagVarchar(int8_t tagType, char* tagVal, uint32_t tagLen, char* varData,
int32_t bufSize) {
int outputLen = -1;
switch (tagType) {
case TSDB_DATA_TYPE_TINYINT:
outputLen = snprintf(varDataVal(varData), bufSize, "%d", *((int8_t*)tagVal));
break;
case TSDB_DATA_TYPE_UTINYINT:
outputLen = snprintf(varDataVal(varData), bufSize, "%u", *((uint8_t*)tagVal));
break;
case TSDB_DATA_TYPE_SMALLINT:
outputLen = snprintf(varDataVal(varData), bufSize, "%d", *((int16_t*)tagVal));
break;
case TSDB_DATA_TYPE_USMALLINT:
outputLen = snprintf(varDataVal(varData), bufSize, "%u", *((uint16_t*)tagVal));
break;
case TSDB_DATA_TYPE_INT:
outputLen = snprintf(varDataVal(varData), bufSize, "%d", *((int32_t*)tagVal));
break;
case TSDB_DATA_TYPE_UINT:
outputLen = snprintf(varDataVal(varData), bufSize, "%u", *((uint32_t*)tagVal));
break;
case TSDB_DATA_TYPE_BIGINT:
outputLen = snprintf(varDataVal(varData), bufSize, "%" PRId64, *((int64_t*)tagVal));
break;
case TSDB_DATA_TYPE_UBIGINT:
outputLen = snprintf(varDataVal(varData), bufSize, "%" PRIu64, *((uint64_t*)tagVal));
break;
case TSDB_DATA_TYPE_FLOAT: {
float fv = 0;
fv = GET_FLOAT_VAL(tagVal);
outputLen = snprintf(varDataVal(varData), bufSize, "%f", fv);
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
double dv = 0;
dv = GET_DOUBLE_VAL(tagVal);
outputLen = snprintf(varDataVal(varData), bufSize, "%lf", dv);
break;
}
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_JSON: {
memcpy(varDataVal(varData), tagVal, tagLen);
outputLen = tagLen;
break;
}
case TSDB_DATA_TYPE_TIMESTAMP:
outputLen = snprintf(varDataVal(varData), bufSize, "%" PRId64, *((int64_t*)tagVal));
break;
case TSDB_DATA_TYPE_BOOL:
outputLen = snprintf(varDataVal(varData), bufSize, "%d", *((int8_t*)tagVal));
break;
default:
return TSDB_CODE_FAILED;
}
if (outputLen < 0 || outputLen == bufSize && !IS_VAR_DATA_TYPE(tagType) || outputLen > bufSize) {
return TSDB_CODE_FAILED;
}
varDataSetLen(varData, outputLen);
return TSDB_CODE_SUCCESS;
}
static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SSysTableScanInfo* pInfo = pOperator->info;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
if (pInfo->pCur == NULL) {
pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
}
blockDataCleanup(pInfo->pRes);
int32_t numOfRows = 0;
const char* db = NULL;
int32_t vgId = 0;
vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB);
tNameGetDbName(&sn, varDataVal(dbname));
varDataSetLen(dbname, strlen(varDataVal(dbname)));
SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_USER_TAGS);
blockDataEnsureCapacity(p, pOperator->resultInfo.capacity);
int32_t ret = 0;
while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) {
if (pInfo->pCur->mr.me.type != TSDB_CHILD_TABLE) {
continue;
}
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(tableName, pInfo->pCur->mr.me.name);
SMetaReader smr = {0};
metaReaderInit(&smr, pInfo->readHandle.meta, 0);
uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid;
int32_t code = metaGetTableEntryByUid(&smr, suid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
metaReaderClear(&smr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
longjmp(pTaskInfo->env, terrno);
}
char stableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(stableName, smr.me.name);
int32_t numOfTags = smr.me.stbEntry.schemaTag.nCols;
for (int32_t i = 0; i < numOfTags; ++i) {
SColumnInfoData* pColInfoData = NULL;
// table name
pColInfoData = taosArrayGet(p->pDataBlock, 0);
colDataAppend(pColInfoData, numOfRows, tableName, false);
// database name
pColInfoData = taosArrayGet(p->pDataBlock, 1);
colDataAppend(pColInfoData, numOfRows, dbname, false);
// super table name
pColInfoData = taosArrayGet(p->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, stableName, false);
char tagName[TSDB_COL_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(tagName, smr.me.stbEntry.schemaTag.pSchema[i].name);
pColInfoData = taosArrayGet(p->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, tagName, false);
int8_t tagType = smr.me.stbEntry.schemaTag.pSchema[i].type;
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataAppend(pColInfoData, numOfRows, (char*)&tagType, false);
STagVal tagVal = {0};
tagVal.cid = smr.me.stbEntry.schemaTag.pSchema[i].colId;
char* tagData = NULL;
uint32_t tagLen = 0;
if (tagType == TSDB_DATA_TYPE_JSON) {
// TODO: json type?+varheader+data
tagData = varDataVal(pInfo->pCur->mr.me.ctbEntry.pTags + 1);
tagLen = varDataLen(pInfo->pCur->mr.me.ctbEntry.pTags + 1);
} else {
bool exist = tTagGet((STag*)pInfo->pCur->mr.me.ctbEntry.pTags, &tagVal);
if (exist) {
if (IS_VAR_DATA_TYPE(tagType)) {
tagData = (char*)tagVal.pData;
tagLen = tagVal.nData;
} else {
tagData = (char*)&tagVal.i64;
tagLen = tDataTypes[tagType].bytes;
}
}
}
int32_t bufSize = IS_VAR_DATA_TYPE(tagType) ? (tagLen + VARSTR_HEADER_SIZE)
: (3 + DBL_MANT_DIG - DBL_MIN_EXP + VARSTR_HEADER_SIZE);
char* tagVarChar = NULL;
if (tagData != NULL) {
tagVarChar = taosMemoryMalloc(bufSize);
code = convertTagDataToTagVarchar(tagType, tagData, tagLen, tagVarChar, bufSize);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
taosMemoryFree(tagVarChar);
metaReaderClear(&smr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
longjmp(pTaskInfo->env, terrno);
}
}
pColInfoData = taosArrayGet(p->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, tagVarChar,
(tagData == NULL) || (tagType == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(tagData)));
taosMemoryFree(tagVarChar);
++numOfRows;
}
metaReaderClear(&smr);
if (numOfRows >= pOperator->resultInfo.capacity) {
break;
}
}
// todo temporarily free the cursor here, the true reason why the free is not valid needs to be found
if (ret != 0) {
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
doSetOperatorCompleted(pOperator);
}
p->info.rows = numOfRows;
pInfo->pRes->info.rows = numOfRows;
relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
doFilterResult(pInfo);
blockDataDestroy(p);
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SSysTableScanInfo* pInfo = pOperator->info;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
// the retrieve is executed on the mnode, so return tables that belongs to the information schema database.
if (pInfo->readHandle.mnd != NULL) {
buildSysDbTableInfo(pInfo, pOperator->resultInfo.capacity);
doFilterResult(pInfo);
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
doSetOperatorCompleted(pOperator);
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
} else {
if (pInfo->pCur == NULL) {
pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
}
blockDataCleanup(pInfo->pRes);
int32_t numOfRows = 0;
const char* db = NULL;
int32_t vgId = 0;
vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB);
tNameGetDbName(&sn, varDataVal(dbname));
varDataSetLen(dbname, strlen(varDataVal(dbname)));
SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_USER_TABLES);
blockDataEnsureCapacity(p, pOperator->resultInfo.capacity);
char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t ret = 0;
while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) {
STR_TO_VARSTR(n, pInfo->pCur->mr.me.name);
// table name
SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0);
colDataAppend(pColInfoData, numOfRows, n, false);
// database name
pColInfoData = taosArrayGet(p->pDataBlock, 1);
colDataAppend(pColInfoData, numOfRows, dbname, false);
// vgId
pColInfoData = taosArrayGet(p->pDataBlock, 6);
colDataAppend(pColInfoData, numOfRows, (char*)&vgId, false);
int32_t tableType = pInfo->pCur->mr.me.type;
if (tableType == TSDB_CHILD_TABLE) {
// create time
int64_t ts = pInfo->pCur->mr.me.ctbEntry.ctime;
pColInfoData = taosArrayGet(p->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, (char*)&ts, false);
SMetaReader mr = {0};
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid;
int32_t code = metaGetTableEntryByUid(&mr, suid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
metaReaderClear(&mr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
longjmp(pTaskInfo->env, terrno);
}
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schemaRow.nCols, false);
// super table name
STR_TO_VARSTR(n, mr.me.name);
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataAppend(pColInfoData, numOfRows, n, false);
metaReaderClear(&mr);
// table comment
pColInfoData = taosArrayGet(p->pDataBlock, 8);
if (pInfo->pCur->mr.me.ctbEntry.commentLen > 0) {
char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(comment, pInfo->pCur->mr.me.ctbEntry.comment);
colDataAppend(pColInfoData, numOfRows, comment, false);
} else if (pInfo->pCur->mr.me.ctbEntry.commentLen == 0) {
char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(comment, "");
colDataAppend(pColInfoData, numOfRows, comment, false);
} else {
colDataAppendNULL(pColInfoData, numOfRows);
}
// uid
pColInfoData = taosArrayGet(p->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false);
// ttl
pColInfoData = taosArrayGet(p->pDataBlock, 7);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ctbEntry.ttlDays, false);
STR_TO_VARSTR(n, "CHILD_TABLE");
} else if (tableType == TSDB_NORMAL_TABLE) {
// create time
pColInfoData = taosArrayGet(p->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false);
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schemaRow.nCols, false);
// super table name
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataAppendNULL(pColInfoData, numOfRows);
// table comment
pColInfoData = taosArrayGet(p->pDataBlock, 8);
if (pInfo->pCur->mr.me.ntbEntry.commentLen > 0) {
char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(comment, pInfo->pCur->mr.me.ntbEntry.comment);
colDataAppend(pColInfoData, numOfRows, comment, false);
} else if (pInfo->pCur->mr.me.ntbEntry.commentLen == 0) {
char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(comment, "");
colDataAppend(pColInfoData, numOfRows, comment, false);
} else {
colDataAppendNULL(pColInfoData, numOfRows);
}
// uid
pColInfoData = taosArrayGet(p->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false);
// ttl
pColInfoData = taosArrayGet(p->pDataBlock, 7);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ttlDays, false);
STR_TO_VARSTR(n, "NORMAL_TABLE");
}
pColInfoData = taosArrayGet(p->pDataBlock, 9);
colDataAppend(pColInfoData, numOfRows, n, false);
if (++numOfRows >= pOperator->resultInfo.capacity) {
break;
}
}
// todo temporarily free the cursor here, the true reason why the free is not valid needs to be found
if (ret != 0) {
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
doSetOperatorCompleted(pOperator);
}
p->info.rows = numOfRows;
pInfo->pRes->info.rows = numOfRows;
relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
doFilterResult(pInfo);
blockDataDestroy(p);
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
}
static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
// build message and send to mnode to fetch the content of system tables.
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SSysTableScanInfo* pInfo = pOperator->info;
// retrieve local table list info from vnode
const char* name = tNameGetTableName(&pInfo->name);
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
// the retrieve is executed on the mnode, so return tables that belongs to the information schema database.
if (pInfo->readHandle.mnd != NULL) {
buildSysDbTableInfo(pInfo, pOperator->resultInfo.capacity);
doFilterResult(pInfo);
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
doSetOperatorCompleted(pOperator);
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
} else {
if (pInfo->pCur == NULL) {
pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
}
blockDataCleanup(pInfo->pRes);
int32_t numOfRows = 0;
const char* db = NULL;
int32_t vgId = 0;
vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB);
tNameGetDbName(&sn, varDataVal(dbname));
varDataSetLen(dbname, strlen(varDataVal(dbname)));
SSDataBlock* p = buildSysTableMetaBlock();
blockDataEnsureCapacity(p, pOperator->resultInfo.capacity);
char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t ret = 0;
while ((ret = metaTbCursorNext(pInfo->pCur)) == 0) {
STR_TO_VARSTR(n, pInfo->pCur->mr.me.name);
// table name
SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0);
colDataAppend(pColInfoData, numOfRows, n, false);
// database name
pColInfoData = taosArrayGet(p->pDataBlock, 1);
colDataAppend(pColInfoData, numOfRows, dbname, false);
// vgId
pColInfoData = taosArrayGet(p->pDataBlock, 6);
colDataAppend(pColInfoData, numOfRows, (char*)&vgId, false);
int32_t tableType = pInfo->pCur->mr.me.type;
if (tableType == TSDB_CHILD_TABLE) {
// create time
int64_t ts = pInfo->pCur->mr.me.ctbEntry.ctime;
pColInfoData = taosArrayGet(p->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, (char*)&ts, false);
SMetaReader mr = {0};
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid;
int32_t code = metaGetTableEntryByUid(&mr, suid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
metaReaderClear(&mr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
longjmp(pTaskInfo->env, terrno);
}
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*)&mr.me.stbEntry.schemaRow.nCols, false);
// super table name
STR_TO_VARSTR(n, mr.me.name);
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataAppend(pColInfoData, numOfRows, n, false);
metaReaderClear(&mr);
// table comment
pColInfoData = taosArrayGet(p->pDataBlock, 8);
if (pInfo->pCur->mr.me.ctbEntry.commentLen > 0) {
char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(comment, pInfo->pCur->mr.me.ctbEntry.comment);
colDataAppend(pColInfoData, numOfRows, comment, false);
} else if (pInfo->pCur->mr.me.ctbEntry.commentLen == 0) {
char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(comment, "");
colDataAppend(pColInfoData, numOfRows, comment, false);
} else {
colDataAppendNULL(pColInfoData, numOfRows);
}
// uid
pColInfoData = taosArrayGet(p->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false);
// ttl
pColInfoData = taosArrayGet(p->pDataBlock, 7);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ctbEntry.ttlDays, false);
STR_TO_VARSTR(n, "CHILD_TABLE");
} else if (tableType == TSDB_NORMAL_TABLE) {
// create time
pColInfoData = taosArrayGet(p->pDataBlock, 2);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ctime, false);
// number of columns
pColInfoData = taosArrayGet(p->pDataBlock, 3);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.schemaRow.nCols, false);
// super table name
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataAppendNULL(pColInfoData, numOfRows);
// table comment
pColInfoData = taosArrayGet(p->pDataBlock, 8);
if (pInfo->pCur->mr.me.ntbEntry.commentLen > 0) {
char comment[TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(comment, pInfo->pCur->mr.me.ntbEntry.comment);
colDataAppend(pColInfoData, numOfRows, comment, false);
} else if (pInfo->pCur->mr.me.ntbEntry.commentLen == 0) {
char comment[VARSTR_HEADER_SIZE + VARSTR_HEADER_SIZE] = {0};
STR_TO_VARSTR(comment, "");
colDataAppend(pColInfoData, numOfRows, comment, false);
} else {
colDataAppendNULL(pColInfoData, numOfRows);
}
// uid
pColInfoData = taosArrayGet(p->pDataBlock, 5);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.uid, false);
// ttl
pColInfoData = taosArrayGet(p->pDataBlock, 7);
colDataAppend(pColInfoData, numOfRows, (char*)&pInfo->pCur->mr.me.ntbEntry.ttlDays, false);
STR_TO_VARSTR(n, "NORMAL_TABLE");
}
pColInfoData = taosArrayGet(p->pDataBlock, 9);
colDataAppend(pColInfoData, numOfRows, n, false);
if (++numOfRows >= pOperator->resultInfo.capacity) {
break;
}
}
// todo temporarily free the cursor here, the true reason why the free is not valid needs to be found
if (ret != 0) {
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
doSetOperatorCompleted(pOperator);
}
p->info.rows = numOfRows;
pInfo->pRes->info.rows = numOfRows;
relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
doFilterResult(pInfo);
blockDataDestroy(p);
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
return sysTableScanUserTables(pOperator);
} else if (strncasecmp(name, TSDB_INS_TABLE_USER_TAGS, TSDB_TABLE_FNAME_LEN) == 0) {
return sysTableScanUserTags(pOperator);
} else { // load the meta from mnode of the given epset
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@ -2058,7 +2293,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
}
int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) {
SSDataBlock* p = buildSysTableMetaBlock();
SSDataBlock* p = buildInfoSchemaTableMetaBlock(TSDB_INS_TABLE_USER_TABLES);
blockDataEnsureCapacity(p, capacity);
size_t size = 0;
@ -2147,7 +2382,8 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
tNameAssign(&pInfo->name, &pScanNode->tableName);
const char* name = tNameGetTableName(&pInfo->name);
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0) {
if (strncasecmp(name, TSDB_INS_TABLE_USER_TABLES, TSDB_TABLE_FNAME_LEN) == 0 ||
strncasecmp(name, TSDB_INS_TABLE_USER_TAGS, TSDB_TABLE_FNAME_LEN) == 0) {
pInfo->readHandle = *(SReadHandle*)readHandle;
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
} else {
@ -2941,4 +3177,3 @@ _error:
taosMemoryFree(pOperator);
return NULL;
}

View File

@ -1353,13 +1353,13 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
if (chIds && pPullDataMap) {
SArray* chAy = *(SArray**)chIds;
int32_t size = taosArrayGetSize(chAy);
qDebug("window %" PRId64 " wait child size:%d", win.skey, size);
qDebug("===stream===window %" PRId64 " wait child size:%d", win.skey, size);
for (int32_t i = 0; i < size; i++) {
qDebug("window %" PRId64 " wait child id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
qDebug("===stream===window %" PRId64 " wait child id:%d", win.skey, *(int32_t*)taosArrayGet(chAy, i));
}
continue;
} else if (pPullDataMap) {
qDebug("close window %" PRId64, win.skey);
qDebug("===stream===close window %" PRId64, win.skey);
}
SResultRowPosition* pPos = (SResultRowPosition*)pIte;
if (pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
@ -2482,7 +2482,9 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId};
// add pull data request
taosArrayPush(pInfo->pPullWins, &pull);
addPullWindow(pInfo->pPullDataMap, &winRes, taosArrayGetSize(pInfo->pChildren));
int32_t size = taosArrayGetSize(pInfo->pChildren);
addPullWindow(pInfo->pPullDataMap, &winRes, size);
qDebug("===stream===prepare retrive %" PRId64 ", size:%d", winRes.ts, size);
} else {
int32_t index = -1;
SArray* chArray = NULL;
@ -2492,14 +2494,14 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
chId = getChildIndex(pSDataBlock);
index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
}
if (index != -1 && pSDataBlock->info.type == STREAM_PULL_DATA) {
qDebug("======delete child id %d", chId);
taosArrayRemove(chArray, index);
if (taosArrayGetSize(chArray) == 0) {
// pull data is over
taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
}
}
// if (index != -1 && pSDataBlock->info.type == STREAM_PULL_DATA) {
// qDebug("===stream===delete child id %d", chId);
// taosArrayRemove(chArray, index);
// if (taosArrayGetSize(chArray) == 0) {
// // pull data is over
// taosHashRemove(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
// }
// }
if (index == -1 || pSDataBlock->info.type == STREAM_PULL_DATA) {
ignore = false;
}
@ -2623,6 +2625,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
SArray* chArray = *(SArray**)chIds;
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
if (index != -1) {
qDebug("===stream===window %" PRId64 " delete child id %d", winRes.ts, chId);
taosArrayRemove(chArray, index);
if (taosArrayGetSize(chArray) == 0) {
// pull data is over
@ -2641,7 +2644,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SExprSupp* pSup = &pOperator->exprSupp;
qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
qDebug("interval status %d %s", pOperator->status, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@ -2657,18 +2660,18 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
return NULL;
}
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
return pInfo->binfo.pRes;
} else {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows != 0) {
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
return pInfo->binfo.pRes;
}
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
pInfo->returnUpdate = false;
ASSERT(!IS_FINAL_OP(pInfo));
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
// process the rest of the data
return pInfo->pUpdateRes;
}
@ -2676,13 +2679,13 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
if (pInfo->pPullDataRes->info.rows != 0) {
// process the rest of the data
ASSERT(IS_FINAL_OP(pInfo));
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
return pInfo->pPullDataRes;
}
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
if (pInfo->pDelRes->info.rows != 0) {
// process the rest of the data
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
return pInfo->pDelRes;
}
}
@ -2693,10 +2696,10 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
clearSpecialDataBlock(pInfo->pUpdateRes);
removeDeleteResults(pUpdated, pInfo->pDelWins);
pOperator->status = OP_RES_TO_RETURN;
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
qDebug("%s return data", IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
break;
}
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval Final recv" : "interval Semi recv");
maxTs = TMAX(maxTs, pBlock->info.window.ekey);
if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA ||
@ -2771,6 +2774,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info;
pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
taosArrayPush(pInfo->pChildren, &pChildOp);
qDebug("===stream===add child, id:%d", chIndex);
}
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex);
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
@ -2795,14 +2799,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows != 0) {
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
return pInfo->binfo.pRes;
}
if (pInfo->pUpdateRes->info.rows != 0 && pInfo->returnUpdate) {
pInfo->returnUpdate = false;
ASSERT(!IS_FINAL_OP(pInfo));
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->pUpdateRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
// process the rest of the data
return pInfo->pUpdateRes;
}
@ -2811,14 +2815,14 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
if (pInfo->pPullDataRes->info.rows != 0) {
// process the rest of the data
ASSERT(IS_FINAL_OP(pInfo));
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->pPullDataRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
return pInfo->pPullDataRes;
}
doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
if (pInfo->pDelRes->info.rows != 0) {
// process the rest of the data
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval Final" : "interval Semi");
return pInfo->pDelRes;
}
// ASSERT(false);

View File

@ -1543,7 +1543,7 @@ typedef struct SCollectFuncsCxt {
int32_t errCode;
FFuncClassifier classifier;
SNodeList* pFuncs;
SHashObj* pAliasName;
SHashObj* pFuncsSet;
} SCollectFuncsCxt;
static EDealRes collectFuncs(SNode* pNode, void* pContext) {
@ -1551,28 +1551,40 @@ static EDealRes collectFuncs(SNode* pNode, void* pContext) {
if (QUERY_NODE_FUNCTION == nodeType(pNode) && pCxt->classifier(((SFunctionNode*)pNode)->funcId) &&
!(((SExprNode*)pNode)->orderAlias)) {
SExprNode* pExpr = (SExprNode*)pNode;
if (NULL == taosHashGet(pCxt->pAliasName, pExpr->aliasName, strlen(pExpr->aliasName))) {
if (NULL == taosHashGet(pCxt->pFuncsSet, &pExpr, POINTER_BYTES)) {
pCxt->errCode = nodesListStrictAppend(pCxt->pFuncs, nodesCloneNode(pNode));
taosHashPut(pCxt->pAliasName, pExpr->aliasName, strlen(pExpr->aliasName), &pExpr, POINTER_BYTES);
taosHashPut(pCxt->pFuncsSet, &pExpr, POINTER_BYTES, &pExpr, POINTER_BYTES);
}
return (TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
}
return DEAL_RES_CONTINUE;
}
static uint32_t funcNodeHash(const char* pKey, uint32_t len) {
SExprNode* pExpr = *(SExprNode**)pKey;
return MurmurHash3_32(pExpr->aliasName, strlen(pExpr->aliasName));
}
static int32_t funcNodeEqual(const void* pLeft, const void* pRight, size_t len) {
if (0 != strcmp((*(const SExprNode**)pLeft)->aliasName, (*(const SExprNode**)pRight)->aliasName)) {
return 1;
}
return nodesEqualNode(*(const SNode**)pLeft, *(const SNode**)pRight) ? 0 : 1;
}
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs) {
if (NULL == pSelect || NULL == pFuncs) {
return TSDB_CODE_FAILED;
}
SCollectFuncsCxt cxt = {
.errCode = TSDB_CODE_SUCCESS,
.classifier = classifier,
.pFuncs = (NULL == *pFuncs ? nodesMakeList() : *pFuncs),
.pAliasName = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, false)};
if (NULL == cxt.pFuncs) {
SCollectFuncsCxt cxt = {.errCode = TSDB_CODE_SUCCESS,
.classifier = classifier,
.pFuncs = (NULL == *pFuncs ? nodesMakeList() : *pFuncs),
.pFuncsSet = taosHashInit(4, funcNodeHash, false, false)};
if (NULL == cxt.pFuncs || NULL == cxt.pFuncsSet) {
return TSDB_CODE_OUT_OF_MEMORY;
}
taosHashSetEqualFp(cxt.pFuncsSet, funcNodeEqual);
*pFuncs = NULL;
nodesWalkSelectStmt(pSelect, clause, collectFuncs, &cxt);
if (TSDB_CODE_SUCCESS == cxt.errCode) {
@ -1584,7 +1596,7 @@ int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifi
} else {
nodesDestroyList(cxt.pFuncs);
}
taosHashCleanup(cxt.pAliasName);
taosHashCleanup(cxt.pFuncsSet);
return cxt.errCode;
}

View File

@ -371,6 +371,19 @@ static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt*
return code;
}
static int32_t collectMetaKeyFromShowTags(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB,
TSDB_INS_TABLE_USER_TAGS, pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
if (NULL != pStmt->pDbName) {
code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
} else {
code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
}
}
return code;
}
static int32_t collectMetaKeyFromShowUsers(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USER_USERS,
pCxt->pMetaCache);
@ -537,6 +550,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return collectMetaKeyFromShowStreams(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_TABLES_STMT:
return collectMetaKeyFromShowTables(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_TAGS_STMT:
return collectMetaKeyFromShowTags(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_USERS_STMT:
return collectMetaKeyFromShowUsers(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_LICENCE_STMT:

View File

@ -868,7 +868,8 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal,
}
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY: {
if (strict && (pVal->node.resType.bytes > targetDt.bytes - VARSTR_HEADER_SIZE)) {
if (strict && (!IS_VAR_DATA_TYPE(pVal->node.resType.type) ||
pVal->node.resType.bytes > targetDt.bytes - VARSTR_HEADER_SIZE)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1);
@ -888,6 +889,9 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal,
break;
}
case TSDB_DATA_TYPE_NCHAR: {
if (strict && !IS_VAR_DATA_TYPE(pVal->node.resType.type)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1);
if (NULL == pVal->datum.p) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY);
@ -1168,7 +1172,7 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p
return TSDB_CODE_SUCCESS;
}
if (isSelectStmt(pCxt->pCurrStmt)) {
//select percentile() without from clause is also valid
// select percentile() without from clause is also valid
if (NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable) {
return TSDB_CODE_SUCCESS;
}
@ -1681,7 +1685,8 @@ static int32_t dnodeToVgroupsInfo(SArray* pDnodes, SVgroupsInfo** pVgsInfo) {
static bool sysTableFromVnode(const char* pTable) {
return (0 == strcmp(pTable, TSDB_INS_TABLE_USER_TABLES)) ||
(0 == strcmp(pTable, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED));
(0 == strcmp(pTable, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED) ||
(0 == strcmp(pTable, TSDB_INS_TABLE_USER_TAGS)));
}
static bool sysTableFromDnode(const char* pTable) { return 0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES); }
@ -1697,7 +1702,7 @@ static int32_t setVnodeSysTableVgroupList(STranslateContext* pCxt, SName* pName,
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TABLES)) {
code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
}
@ -1786,7 +1791,8 @@ static bool isSingleTable(SRealTableNode* pRealTable) {
int8_t tableType = pRealTable->pMeta->tableType;
if (TSDB_SYSTEM_TABLE == tableType) {
return 0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TABLES) &&
0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED);
0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED) &&
0 != strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_USER_TAGS);
}
return (TSDB_CHILD_TABLE == tableType || TSDB_NORMAL_TABLE == tableType);
}
@ -5059,6 +5065,8 @@ static const char* getSysTableName(ENodeType type) {
return TSDB_INS_TABLE_USER_DATABASES;
case QUERY_NODE_SHOW_TABLES_STMT:
return TSDB_INS_TABLE_USER_TABLES;
case QUERY_NODE_SHOW_TAGS_STMT:
return TSDB_INS_TABLE_USER_TAGS;
case QUERY_NODE_SHOW_STABLES_STMT:
return TSDB_INS_TABLE_USER_STABLES;
case QUERY_NODE_SHOW_USERS_STMT:

View File

@ -282,7 +282,7 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pScan->hasNormalCols = true;
}
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_CODE_SUCCESS == code && SCAN_TYPE_SYSTEM_TABLE != pScan->scanType) {
code = addPrimaryKeyCol(pScan->tableId, &pScan->pScanCols);
}

View File

@ -1050,8 +1050,11 @@ static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicS
}
}
int32_t code =
replaceLogicNode(pLogicSubplan, (SLogicNode*)pSort, (SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0));
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0);
if (NULL == pSort->node.pParent) {
TSWAP(pSort->node.pTargets, pChild->pTargets);
}
int32_t code = replaceLogicNode(pLogicSubplan, (SLogicNode*)pSort, pChild);
if (TSDB_CODE_SUCCESS == code) {
NODES_CLEAR_LIST(pSort->node.pChildren);
nodesDestroyNode((SNode*)pSort);
@ -1982,11 +1985,15 @@ static int32_t rewriteUniqueOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog
}
static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) {
if (QUERY_NODE_LOGIC_PLAN_AGG != nodeType(pNode) || !(((SAggLogicNode*)pNode)->hasLastRow) ||
NULL != ((SAggLogicNode*)pNode)->pGroupKeys || 1 != LIST_LENGTH(pNode->pChildren) ||
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pNode->pChildren, 0)) ||
NULL != ((SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0))->node.pConditions ||
0 == ((SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0))->cacheLastMode) {
if (QUERY_NODE_LOGIC_PLAN_AGG != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren) ||
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pNode->pChildren, 0))) {
return false;
}
SAggLogicNode* pAgg = (SAggLogicNode*)pNode;
SScanLogicNode* pScan = (SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0);
if (!pAgg->hasLastRow || NULL != pAgg->pGroupKeys || NULL != pScan->node.pConditions || 0 == pScan->cacheLastMode ||
IS_TSWINDOW_SPECIFIED(pScan->scanRange)) {
return false;
}

View File

@ -570,7 +570,8 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan*
pScan->showRewrite = pScanLogicNode->showRewrite;
pScan->accountId = pCxt->pPlanCxt->acctId;
if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLES) ||
0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED)) {
0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED) ||
0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TAGS)) {
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
} else {
pSubplan->execNode.nodeId = MNODE_HANDLE;

View File

@ -45,6 +45,12 @@ TEST_F(PlanJoinTest, withWhere) {
"WHERE t1.c1 > t2.c1 AND t1.c2 = 'abc' AND t2.c2 = 'qwe'");
}
TEST_F(PlanJoinTest, withAggAndOrderBy) {
useDb("root", "test");
run("SELECT t1.ts, TOP(t2.c1, 10) FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts ORDER BY t2.ts");
}
TEST_F(PlanJoinTest, multiJoin) {
useDb("root", "test");

View File

@ -84,6 +84,12 @@ TEST_F(PlanOptimizeTest, eliminateProjection) {
// run("select 1-abs(c1) from (select unique(c1) c1 from st1s3) order by 1 nulls first");
}
TEST_F(PlanOptimizeTest, mergeProjects) {
useDb("root", "test");
run("SELECT * FROM (SELECT * FROM t1 WHERE c1 > 10 ORDER BY ts) ORDER BY ts");
}
TEST_F(PlanOptimizeTest, pushDownProjectCond) {
useDb("root", "test");
run("select 1-abs(c1) from (select unique(c1) c1 from st1s3) where 1-c1>5 order by 1 nulls first");

View File

@ -75,7 +75,9 @@ typedef struct SQWDebug {
bool lockEnable;
bool statusEnable;
bool dumpEnable;
bool tmp;
bool sleepSimulate;
bool deadSimulate;
bool redirectSimulate;
} SQWDebug;
extern SQWDebug gQWDebug;
@ -130,12 +132,11 @@ typedef struct SQWTaskCtx {
int8_t taskType;
int8_t explain;
int8_t needFetch;
int32_t queryType;
int32_t msgType;
int32_t fetchType;
int32_t execId;
bool queryRsped;
bool queryFetched;
bool queryEnd;
bool queryContinue;
bool queryInQueue;
@ -228,6 +229,7 @@ typedef struct SQWorkerMgmt {
#define QW_SET_EVENT_PROCESSED(ctx, event) atomic_store_8(&(ctx)->events[event], QW_EVENT_PROCESSED)
#define QW_GET_PHASE(ctx) atomic_load_8(&(ctx)->phase)
#define QW_SET_PHASE(ctx, _value) do { if ((_value) != QW_PHASE_PRE_FETCH && (_value) != QW_PHASE_POST_FETCH) { atomic_store_8(&(ctx)->phase, _value); } } while (0)
#define QW_SET_RSP_CODE(ctx, code) atomic_store_32(&(ctx)->rspCode, code)
#define QW_UPDATE_RSP_CODE(ctx, code) atomic_val_compare_exchange_32(&(ctx)->rspCode, 0, code)
@ -362,7 +364,7 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx);
int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx);
int32_t qwKillTaskHandle(SQWTaskCtx *ctx);
int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status);
int32_t qwDropTask(QW_FPARAMS_DEF);
void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx);
@ -372,13 +374,15 @@ int32_t qwUpdateTimeInQueue(SQWorker *mgmt, int64_t ts, EQueueType type);
int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type);
void qwClearExpiredSch(SQWorker *mgmt, SArray* pExpiredSch);
int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch);
void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx);
void qwFreeTaskCtx(SQWTaskCtx *ctx);
void qwDbgDumpMgmtInfo(SQWorker *mgmt);
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore);
int32_t qwDbgBuildAndSendRedirectRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SEpSet *pEpSet);
int32_t qwAddTaskCtx(QW_FPARAMS_DEF);
int32_t qwDbgResponseRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx);
void qwDbgSimulateRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx, bool *rsped);
void qwDbgSimulateSleep(void);
void qwDbgSimulateDead(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *rsped);
#ifdef __cplusplus

View File

@ -40,11 +40,13 @@ void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComple
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SQWTaskCtx *ctx);
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num);
int32_t qwBuildAndSendErrorRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code);
void qwFreeFetchRsp(void *msg);
int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp);
int32_t qwBuildAndSendHbRsp(SRpcHandleInfo *pConn, SSchedulerHbRsp *rsp, int32_t code);
int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *pConn);
int32_t qwBuildAndSendDropMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
#ifdef __cplusplus
}

View File

@ -9,7 +9,7 @@
#include "tmsg.h"
#include "tname.h"
SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = false, .tmp = false};
SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = false, .redirectSimulate = false, .deadSimulate = false, .sleepSimulate = false};
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) {
if (!gQWDebug.statusEnable) {
@ -147,8 +147,17 @@ int32_t qwDbgBuildAndSendRedirectRsp(int32_t rspType, SRpcHandleInfo *pConn, int
return TSDB_CODE_SUCCESS;
}
int32_t qwDbgResponseRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx) {
if (gQWDebug.tmp) {
void qwDbgSimulateRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx, bool *rsped) {
static int32_t ignoreTime = 0;
if (*rsped) {
return;
}
if (gQWDebug.redirectSimulate) {
if (++ignoreTime <= 10) {
return;
}
if (TDMT_SCH_QUERY == qwMsg->msgType && (0 == taosRand() % 3)) {
SEpSet epSet = {0};
epSet.inUse = 1;
@ -162,42 +171,94 @@ int32_t qwDbgResponseRedirect(SQWMsg *qwMsg, SQWTaskCtx *ctx) {
ctx->phase = QW_PHASE_POST_QUERY;
qwDbgBuildAndSendRedirectRsp(qwMsg->msgType + 1, &qwMsg->connInfo, TSDB_CODE_RPC_REDIRECT, &epSet);
return TSDB_CODE_SUCCESS;
*rsped = true;
return;
}
if (TDMT_SCH_MERGE_QUERY == qwMsg->msgType && (0 == taosRand() % 3)) {
ctx->phase = QW_PHASE_POST_QUERY;
QW_SET_PHASE(ctx, QW_PHASE_POST_QUERY);
qwDbgBuildAndSendRedirectRsp(qwMsg->msgType + 1, &qwMsg->connInfo, TSDB_CODE_RPC_REDIRECT, NULL);
return TSDB_CODE_SUCCESS;
*rsped = true;
return;
}
if ((TDMT_SCH_FETCH == qwMsg->msgType) && (0 == taosRand() % 9)) {
qwDbgBuildAndSendRedirectRsp(qwMsg->msgType + 1, &qwMsg->connInfo, TSDB_CODE_RPC_REDIRECT, NULL);
*rsped = true;
return;
}
}
return TSDB_CODE_SUCCESS;
}
void qwDbgSimulateSleep(void) {
if (!gQWDebug.sleepSimulate) {
return;
}
static int32_t ignoreTime = 0;
if (++ignoreTime > 10) {
taosSsleep(taosRand() % 20);
}
}
void qwDbgSimulateDead(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *rsped) {
if (!gQWDebug.deadSimulate) {
return;
}
if (*rsped) {
return;
}
static int32_t ignoreTime = 0;
if (++ignoreTime > 10 && 0 == taosRand() % 9) {
SRpcHandleInfo *pConn = ((ctx->msgType == TDMT_SCH_FETCH || ctx->msgType == TDMT_SCH_MERGE_FETCH) ? &ctx->dataConnInfo : &ctx->ctrlConnInfo);
qwBuildAndSendErrorRsp(ctx->msgType + 1, pConn, TSDB_CODE_RPC_BROKEN_LINK);
qwBuildAndSendDropMsg(QW_FPARAMS(), pConn);
*rsped = true;
return;
}
}
int32_t qwDbgEnableDebug(char *option) {
if (0 == strcasecmp(option, "lock")) {
gQWDebug.lockEnable = true;
qDebug("qw lock debug enabled");
qError("qw lock debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "status")) {
gQWDebug.statusEnable = true;
qDebug("qw status debug enabled");
qError("qw status debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "dump")) {
gQWDebug.dumpEnable = true;
qDebug("qw dump debug enabled");
qError("qw dump debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "tmp")) {
gQWDebug.tmp = true;
qDebug("qw tmp debug enabled");
if (0 == strcasecmp(option, "sleep")) {
gQWDebug.sleepSimulate = true;
qError("qw sleep debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "dead")) {
gQWDebug.sleepSimulate = true;
qError("qw dead debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "redirect")) {
gQWDebug.redirectSimulate = true;
qError("qw redirect debug enabled");
return TSDB_CODE_SUCCESS;
}

View File

@ -43,6 +43,20 @@ void qwFreeFetchRsp(void *msg) {
}
}
int32_t qwBuildAndSendErrorRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code) {
SRpcMsg rpcRsp = {
.msgType = rspType,
.pCont = NULL,
.contLen = 0,
.code = code,
.info = *pConn,
};
tmsgSendRsp(&rpcRsp);
return TSDB_CODE_SUCCESS;
}
int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SQWTaskCtx *ctx) {
STbVerInfo* tbInfo = ctx ? &ctx->tbInfo : NULL;
int64_t affectedRows = ctx ? ctx->affectedRows : 0;
@ -184,7 +198,6 @@ int32_t qwBuildAndSendDropMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
int32_t code = tmsgPutToQueue(&mgmt->msgCb, FETCH_QUEUE, &pNewMsg);
if (TSDB_CODE_SUCCESS != code) {
QW_SCH_TASK_ELOG("put drop task msg to queue failed, vgId:%d, code:%s", mgmt->nodeId, tstrerror(code));
rpcFreeCont(req);
QW_ERR_RET(code);
}
@ -374,8 +387,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int
qwMsg.msgInfo.needFetch = msg->needFetch;
char * sql = strndup(msg->msg, msg->sqlLen);
QW_SCH_TASK_DLOG("processQuery start, node:%p, type:%s, handle:%p, sql:%s", node, TMSG_INFO(pMsg->msgType), pMsg->info.handle, sql);
QW_SCH_TASK_DLOG("processQuery start, node:%p, type:%s, handle:%p, SQL:%s", node, TMSG_INFO(pMsg->msgType), pMsg->info.handle, sql);
QW_ERR_RET(qwProcessQuery(QW_FPARAMS(), &qwMsg, sql));
QW_SCH_TASK_DLOG("processQuery end, node:%p", node);

View File

@ -270,7 +270,7 @@ int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTask
void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); }
void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) {
void qwFreeTaskHandle(qTaskInfo_t *taskHandle) {
// Note: free/kill may in RC
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
@ -278,7 +278,7 @@ void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) {
}
}
int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
int32_t qwKillTaskHandle(SQWTaskCtx *ctx) {
int32_t code = 0;
// Note: free/kill may in RC
qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle);
@ -290,7 +290,7 @@ int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
QW_RET(code);
}
void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
void qwFreeTaskCtx(SQWTaskCtx *ctx) {
if (ctx->ctrlConnInfo.handle) {
tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER);
}
@ -300,7 +300,7 @@ void qwFreeTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
// NO need to release dataConnInfo
qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle);
qwFreeTaskHandle(&ctx->taskHandle);
if (ctx->sinkHandle) {
dsDestroyDataSinker(ctx->sinkHandle);
@ -336,7 +336,7 @@ int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
}
qwFreeTaskCtx(QW_FPARAMS(), &octx);
qwFreeTaskCtx(&octx);
QW_TASK_DLOG_E("task ctx dropped");
@ -463,13 +463,21 @@ void qwDestroyImpl(void *pMgmt) {
mgmt->hbTimer = NULL;
taosTmrCleanUp(mgmt->timer);
// TODO STOP ALL QUERY
// TODO FREE ALL
uint64_t qId, tId;
int32_t eId;
void *pIter = taosHashIterate(mgmt->ctxHash, NULL);
while (pIter) {
SQWTaskCtx *ctx = (SQWTaskCtx *)pIter;
void *key = taosHashGetKey(pIter, NULL);
QW_GET_QTID(key, qId, tId, eId);
qwFreeTaskCtx(ctx);
QW_TASK_DLOG_E("task ctx freed");
pIter = taosHashIterate(mgmt->ctxHash, pIter);
}
taosHashCleanup(mgmt->ctxHash);
void *pIter = taosHashIterate(mgmt->schHash, NULL);
pIter = taosHashIterate(mgmt->schHash, NULL);
while (pIter) {
SQWSchStatus *sch = (SQWSchStatus *)pIter;
qwDestroySchStatus(sch);

View File

@ -83,6 +83,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryEnd) {
// if *taskHandle is NULL, it's killed right now
if (taskHandle) {
qwDbgSimulateSleep();
code = qExecTask(taskHandle, &pRes, &useconds);
if (code) {
if (code != TSDB_CODE_OPS_NOT_SUPPORT) {
@ -293,11 +294,7 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
QW_LOCK(QW_WRITE, &ctx->lock);
if (QW_PHASE_PRE_FETCH == phase) {
atomic_store_8((int8_t *)&ctx->queryFetched, true);
} else {
atomic_store_8(&ctx->phase, phase);
}
QW_SET_PHASE(ctx, phase);
if (atomic_load_8((int8_t *)&ctx->queryEnd)) {
QW_TASK_ELOG_E("query already end");
@ -370,6 +367,7 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
}
_return:
if (ctx) {
QW_UPDATE_RSP_CODE(ctx, code);
@ -390,7 +388,6 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
int32_t code = 0;
SQWTaskCtx *ctx = NULL;
SRpcHandleInfo connInfo = {0};
SRpcHandleInfo *rspConnection = NULL;
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
@ -403,13 +400,6 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
}
if (QW_PHASE_POST_QUERY == phase) {
connInfo = ctx->ctrlConnInfo;
rspConnection = &connInfo;
ctx->queryRsped = true;
}
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
if (QW_PHASE_POST_FETCH == phase) {
QW_TASK_WLOG("drop received at wrong phase %s", qwPhaseStr(phase));
@ -437,17 +427,23 @@ _return:
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PART_SUCC);
}
if (rspConnection) {
qwBuildAndSendQueryRsp(input->msgType + 1, rspConnection, code, ctx);
QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", rspConnection->handle, code, tstrerror(code));
if (QW_PHASE_POST_QUERY == phase && ctx) {
ctx->queryRsped = true;
bool rsped = false;
SQWMsg qwMsg = {.msgType = ctx->msgType, .connInfo = ctx->ctrlConnInfo};
qwDbgSimulateRedirect(&qwMsg, ctx, &rsped);
qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
if (!rsped) {
qwBuildAndSendQueryRsp(input->msgType + 1, &ctx->ctrlConnInfo, code, ctx);
QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
}
}
if (ctx) {
QW_UPDATE_RSP_CODE(ctx, code);
if (QW_PHASE_POST_FETCH != phase) {
atomic_store_8(&ctx->phase, phase);
}
QW_SET_PHASE(ctx, phase);
QW_UNLOCK(QW_WRITE, &ctx->lock);
qwReleaseTaskCtx(mgmt, ctx);
@ -488,8 +484,6 @@ int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
QW_ERR_JRET(qwAddTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_INIT));
qwDbgResponseRedirect(qwMsg, ctx);
_return:
if (ctx) {
@ -517,7 +511,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, const char* sql) {
ctx->taskType = qwMsg->msgInfo.taskType;
ctx->explain = qwMsg->msgInfo.explain;
ctx->needFetch = qwMsg->msgInfo.needFetch;
ctx->queryType = qwMsg->msgType;
ctx->msgType = qwMsg->msgType;
//QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg);
@ -636,8 +630,8 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
QW_LOCK(QW_WRITE, &ctx->lock);
if (queryEnd || code || 0 == atomic_load_8((int8_t *)&ctx->queryContinue)) {
// Note: if necessary, fetch need to put cquery to queue again
atomic_store_8(&ctx->phase, 0);
// Note: query is not running anymore
QW_SET_PHASE(ctx, 0);
QW_UNLOCK(QW_WRITE, &ctx->lock);
break;
}
@ -662,14 +656,13 @@ int32_t qwProcessFetch(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
QW_ERR_JRET(qwGetTaskCtx(QW_FPARAMS(), &ctx));
ctx->queryType = qwMsg->msgType;
ctx->msgType = qwMsg->msgType;
ctx->dataConnInfo = qwMsg->connInfo;
SOutputData sOutput = {0};
QW_ERR_JRET(qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput));
if (NULL == rsp) {
ctx->dataConnInfo = qwMsg->connInfo;
QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_FETCH);
} else {
bool qComplete = (DS_BUF_EMPTY == sOutput.bufStatus && sOutput.queryEnd);
@ -714,9 +707,16 @@ _return:
}
if (code || rsp) {
qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code);
QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code),
dataLen);
bool rsped = false;
if (ctx) {
qwDbgSimulateRedirect(qwMsg, ctx, &rsped);
qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
}
if (!rsped) {
qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code);
QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code),
dataLen);
}
}
QW_RET(TSDB_CODE_SUCCESS);
@ -724,7 +724,7 @@ _return:
int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
int32_t code = 0;
bool rsped = false;
bool dropped = false;
SQWTaskCtx *ctx = NULL;
bool locked = false;
@ -740,18 +740,14 @@ int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
}
if (QW_QUERY_RUNNING(ctx)) {
QW_ERR_JRET(qwKillTaskHandle(QW_FPARAMS(), ctx));
QW_ERR_JRET(qwKillTaskHandle(ctx));
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_DROP);
} else if (ctx->phase > 0) {
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
rsped = true;
} else {
// task not started
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
dropped = true;
}
if (!rsped) {
ctx->ctrlConnInfo = qwMsg->connInfo;
if (!dropped) {
QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_DROP);
}
@ -954,7 +950,7 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SDeleteRes *pRes) {
_return:
qwFreeTaskCtx(QW_FPARAMS(), &ctx);
qwFreeTaskCtx(&ctx);
QW_RET(TSDB_CODE_SUCCESS);
}

View File

@ -55,13 +55,11 @@ typedef enum {
#define SCHEDULE_DEFAULT_MAX_TASK_NUM 1000
#define SCHEDULE_DEFAULT_MAX_NODE_TABLE_NUM 200 // unit is TSDB_TABLE_NUM_UNIT
#define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ
#define SCHEDULE_DEFAULT_MAX_NODE_NUM 20
#define SCH_DEFAULT_TASK_TIMEOUT_USEC 10000000
#define SCH_MAX_TASK_TIMEOUT_USEC 60000000
#define SCH_MAX_CANDIDATE_EP_NUM TSDB_MAX_REPLICA
#define SCH_DEFAULT_MAX_RETRY_NUM 6
typedef struct SSchDebug {
bool lockEnable;
@ -211,6 +209,7 @@ typedef struct SSchTask {
int32_t maxExecTimes; // task max exec times
int32_t maxRetryTimes; // task max retry times
int32_t retryTimes; // task retry times
bool waitRetry; // wait for retry
int32_t execId; // task current execute index
SSchLevel *level; // level
SRWLatch planLock; // task update plan lock
@ -274,7 +273,8 @@ typedef struct SSchJob {
int32_t errCode;
SRWLatch resLock;
SExecResult execRes;
void *resData; //TODO free it or not
void *fetchRes; //TODO free it or not
bool fetched;
int32_t resNumOfRows;
SSchResInfo userRes;
const char *sql;
@ -326,7 +326,7 @@ extern SSchedulerMgmt schMgmt;
#define SCH_IS_EXPLAIN_JOB(_job) (EXPLAIN_MODE_ANALYZE == (_job)->attr.explainMode)
#define SCH_NETWORK_ERR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
#define SCH_MERGE_TASK_NETWORK_ERR(_task, _code, _len) (SCH_NETWORK_ERR(_code) && (((_len) > 0) || (!SCH_IS_DATA_BIND_TASK(_task))))
#define SCH_REDIRECT_MSGTYPE(_msgType) ((_msgType) == TDMT_SCH_QUERY || (_msgType) == TDMT_SCH_MERGE_QUERY || (_msgType) == TDMT_SCH_FETCH || (_msgType) == TDMT_SCH_MERGE_FETCH)
#define SCH_REDIRECT_MSGTYPE(_msgType) ((_msgType) == TDMT_SCH_LINK_BROKEN || (_msgType) == TDMT_SCH_QUERY || (_msgType) == TDMT_SCH_MERGE_QUERY || (_msgType) == TDMT_SCH_FETCH || (_msgType) == TDMT_SCH_MERGE_FETCH)
#define SCH_TASK_NEED_REDIRECT(_task, _msgType, _code, _rspLen) (SCH_REDIRECT_MSGTYPE(_msgType) && (NEED_SCHEDULER_REDIRECT_ERROR(_code) || SCH_MERGE_TASK_NETWORK_ERR((_task), (_code), (_rspLen))))
#define SCH_NEED_RETRY(_msgType, _code) ((SCH_NETWORK_ERR(_code) && SCH_REDIRECT_MSGTYPE(_msgType)) || (_code) == TSDB_CODE_SCH_TIMEOUT_ERROR)
@ -368,6 +368,8 @@ extern SSchedulerMgmt schMgmt;
qError("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
#define SCH_TASK_DLOG(param, ...) \
qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
#define SCH_TASK_TLOG(param, ...) \
qTrace("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
#define SCH_TASK_DLOGL(param, ...) \
qDebugL("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d " param, pJob->queryId, SCH_TASK_ID(pTask), SCH_TASK_EID(pTask),__VA_ARGS__)
#define SCH_TASK_WLOG(param, ...) \
@ -441,7 +443,7 @@ void schFreeRpcCtx(SRpcCtx *pCtx);
int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp);
bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus);
int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask);
int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp);
int32_t schSaveJobExecRes(SSchJob *pJob, SQueryTableRsp *rsp);
int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp);
void schProcessOnDataFetched(SSchJob *job);
int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask);
@ -492,7 +494,7 @@ int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask);
void schDirectPostJobRes(SSchedulerReq* pReq, int32_t errCode);
int32_t schHandleJobFailure(SSchJob *pJob, int32_t errCode);
int32_t schHandleJobDrop(SSchJob *pJob, int32_t errCode);
bool schChkCurrentOp(SSchJob *pJob, int32_t op, bool sync);
bool schChkCurrentOp(SSchJob *pJob, int32_t op, int8_t sync);
extern SSchDebug gSCHDebug;

View File

@ -110,7 +110,7 @@ int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
break;
case JOB_TASK_STATUS_PART_SUCC:
if (newStatus != JOB_TASK_STATUS_FAIL && newStatus != JOB_TASK_STATUS_SUCC &&
newStatus != JOB_TASK_STATUS_DROP) {
newStatus != JOB_TASK_STATUS_DROP && newStatus != JOB_TASK_STATUS_EXEC) {
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
}
@ -389,13 +389,18 @@ int32_t schDumpJobExecRes(SSchJob* pJob, SExecResult* pRes) {
int32_t schDumpJobFetchRes(SSchJob* pJob, void** pData) {
int32_t code = 0;
if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) {
SCH_ERR_RET(schSwitchJobStatus(pJob, JOB_TASK_STATUS_SUCC, NULL));
SCH_LOCK(SCH_WRITE, &pJob->resLock);
pJob->fetched = true;
if (pJob->fetchRes && ((SRetrieveTableRsp *)pJob->fetchRes)->completed) {
SCH_ERR_JRET(schSwitchJobStatus(pJob, JOB_TASK_STATUS_SUCC, NULL));
}
while (true) {
*pData = atomic_load_ptr(&pJob->resData);
if (*pData != atomic_val_compare_exchange_ptr(&pJob->resData, *pData, NULL)) {
*pData = atomic_load_ptr(&pJob->fetchRes);
if (*pData != atomic_val_compare_exchange_ptr(&pJob->fetchRes, *pData, NULL)) {
continue;
}
@ -414,7 +419,11 @@ int32_t schDumpJobFetchRes(SSchJob* pJob, void** pData) {
SCH_JOB_DLOG("fetch done, totalRows:%d", pJob->resNumOfRows);
return TSDB_CODE_SUCCESS;
_return:
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
return code;
}
int32_t schNotifyUserExecRes(SSchJob* pJob) {
@ -512,8 +521,12 @@ int32_t schHandleJobDrop(SSchJob *pJob, int32_t errCode) {
}
int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) {
schPostJobRes(pJob, SCH_OP_EXEC);
int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) {
if (schChkCurrentOp(pJob, SCH_OP_FETCH, -1)) {
SCH_ERR_RET(schLaunchFetchTask(pJob));
} else {
schPostJobRes(pJob, 0);
}
return TSDB_CODE_SUCCESS;
}
@ -526,7 +539,7 @@ int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRs
SCH_TASK_DLOG("got explain rsp, rows:%d, complete:%d", htonl(pRsp->numOfRows), pRsp->completed);
atomic_store_32(&pJob->resNumOfRows, htonl(pRsp->numOfRows));
atomic_store_ptr(&pJob->resData, pRsp);
atomic_store_ptr(&pJob->fetchRes, pRsp);
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCC);
@ -561,7 +574,7 @@ int32_t schLaunchJobLowerLevel(SSchJob *pJob, SSchTask *pTask) {
return TSDB_CODE_SUCCESS;
}
int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) {
int32_t schSaveJobExecRes(SSchJob *pJob, SQueryTableRsp *rsp) {
if (rsp->tbFName[0]) {
SCH_LOCK(SCH_WRITE, &pJob->resLock);
@ -600,7 +613,7 @@ int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask) {
int32_t schLaunchJob(SSchJob *pJob) {
if (EXPLAIN_MODE_STATIC == pJob->attr.explainMode) {
SCH_ERR_RET(qExecStaticExplain(pJob->pDag, (SRetrieveTableRsp **)&pJob->resData));
SCH_ERR_RET(qExecStaticExplain(pJob->pDag, (SRetrieveTableRsp **)&pJob->fetchRes));
SCH_ERR_RET(schSwitchJobStatus(pJob, JOB_TASK_STATUS_PART_SUCC, NULL));
} else {
SSchLevel *level = taosArrayGet(pJob->levels, pJob->levelIdx);
@ -661,7 +674,7 @@ void schFreeJobImpl(void *job) {
qDestroyQueryPlan(pJob->pDag);
taosMemoryFreeClear(pJob->userRes.execRes);
taosMemoryFreeClear(pJob->resData);
taosMemoryFreeClear(pJob->fetchRes);
taosMemoryFree(pJob);
int32_t jobNum = atomic_sub_fetch_32(&schMgmt.jobNum, 1);
@ -795,9 +808,14 @@ void schDirectPostJobRes(SSchedulerReq* pReq, int32_t errCode) {
}
}
bool schChkCurrentOp(SSchJob *pJob, int32_t op, bool sync) {
bool schChkCurrentOp(SSchJob *pJob, int32_t op, int8_t sync) {
bool r = false;
SCH_LOCK(SCH_READ, &pJob->opStatus.lock);
bool r = (pJob->opStatus.op == op) && (pJob->opStatus.syncReq == sync);
if (sync >= 0) {
r = (pJob->opStatus.op == op) && (pJob->opStatus.syncReq == sync);
} else {
r = (pJob->opStatus.op == op);
}
SCH_UNLOCK(SCH_READ, &pJob->opStatus.lock);
return r;

View File

@ -256,7 +256,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
SCH_ERR_JRET(rsp->code);
SCH_ERR_JRET(schSaveJobQueryRes(pJob, rsp));
SCH_ERR_JRET(schSaveJobExecRes(pJob, rsp));
atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows);
@ -277,8 +277,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
if (pJob->resData) {
SCH_TASK_ELOG("explain result is already generated, res:%p", pJob->resData);
if (pJob->fetchRes) {
SCH_TASK_ELOG("explain result is already generated, res:%p", pJob->fetchRes);
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
}
@ -325,13 +325,13 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
return TSDB_CODE_SUCCESS;
}
if (pJob->resData) {
SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->resData);
if (pJob->fetchRes) {
SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->fetchRes);
taosMemoryFreeClear(rsp);
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
}
atomic_store_ptr(&pJob->resData, rsp);
atomic_store_ptr(&pJob->fetchRes, rsp);
atomic_add_fetch_32(&pJob->resNumOfRows, htonl(rsp->numOfRows));
if (rsp->completed) {
@ -1010,6 +1010,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
memcpy(pMsg->msg + len, pTask->msg, pTask->msgLen);
persistHandle = true;
SCH_SET_TASK_HANDLE(pTask, rpcAllocHandle());
break;
}
case TDMT_SCH_FETCH:

View File

@ -47,10 +47,10 @@ void schFreeTask(SSchJob *pJob, SSchTask *pTask) {
void schInitTaskRetryTimes(SSchJob *pJob, SSchTask *pTask, SSchLevel *pLevel) {
if (SCH_IS_DATA_BIND_TASK(pTask) || (!SCH_IS_QUERY_JOB(pJob)) || (SCH_ALL != schMgmt.cfg.schPolicy)) {
pTask->maxRetryTimes = SCH_MAX_CANDIDATE_EP_NUM;
pTask->maxRetryTimes = SCH_DEFAULT_MAX_RETRY_NUM;
} else {
int32_t nodeNum = taosArrayGetSize(pJob->nodeList);
pTask->maxRetryTimes = TMAX(nodeNum, SCH_MAX_CANDIDATE_EP_NUM);
pTask->maxRetryTimes = TMAX(nodeNum, SCH_DEFAULT_MAX_RETRY_NUM);
}
pTask->maxExecTimes = pTask->maxRetryTimes * (pLevel->level + 1);
@ -64,11 +64,11 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *
pTask->execId = -1;
pTask->timeoutUsec = SCH_DEFAULT_TASK_TIMEOUT_USEC;
pTask->taskId = schGenTaskId();
pTask->execNodes =
taosHashInit(SCH_MAX_CANDIDATE_EP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
schInitTaskRetryTimes(pJob, pTask, pLevel);
pTask->execNodes =
taosHashInit(pTask->maxExecTimes, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
pTask->profile.execTime = taosArrayInit(pTask->maxExecTimes, sizeof(int64_t));
if (NULL == pTask->execNodes || NULL == pTask->profile.execTime) {
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
@ -125,8 +125,8 @@ int32_t schDropTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle, int32_
SCH_TASK_DLOG("execId %d removed from execNodeList", execId);
}
if (execId != pTask->execId) { // ignore it
SCH_TASK_DLOG("execId %d is not current execId %d", execId, pTask->execId);
if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it
SCH_TASK_DLOG("execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR);
}
@ -138,7 +138,17 @@ int32_t schUpdateTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle, int3
return TSDB_CODE_SUCCESS;
}
if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it
SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
return TSDB_CODE_SUCCESS;
}
SSchNodeInfo *nodeInfo = taosHashGet(pTask->execNodes, &execId, sizeof(execId));
if (NULL == nodeInfo) { // ignore it
SCH_TASK_DLOG("handle not updated since execId %d already not exist, current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
return TSDB_CODE_SUCCESS;
}
nodeInfo->handle = handle;
SCH_TASK_DLOG("handle updated to %p for execId %d", handle, execId);
@ -335,6 +345,7 @@ int32_t schDoTaskRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32
return TSDB_CODE_SUCCESS;
}
pTask->waitRetry = true;
schDropTaskOnExecNode(pJob, pTask);
taosHashClear(pTask->execNodes);
SCH_ERR_JRET(schRemoveTaskFromExecList(pJob, pTask));
@ -394,6 +405,18 @@ _return:
int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32_t rspCode) {
int32_t code = 0;
if (JOB_TASK_STATUS_PART_SUCC == pJob->status) {
SCH_LOCK(SCH_WRITE, &pJob->resLock);
if (pJob->fetched) {
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
SCH_TASK_ELOG("already fetched while got error %s", tstrerror(rspCode));
SCH_ERR_RET(rspCode);
}
SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
schUpdateJobStatus(pJob, JOB_TASK_STATUS_EXEC);
}
if (SCH_IS_DATA_BIND_TASK(pTask)) {
if (NULL == pData->pEpSet) {
SCH_TASK_ELOG("no epset updated while got error %s", tstrerror(rspCode));
@ -591,7 +614,7 @@ int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
if (pJob->nodeList) {
nodeNum = taosArrayGetSize(pJob->nodeList);
for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
for (int32_t i = 0; i < nodeNum; ++i) {
SQueryNodeLoad *nload = taosArrayGet(pJob->nodeList, i);
SQueryNodeAddr *naddr = &nload->addr;
@ -600,8 +623,8 @@ int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SCH_TASK_DLOG("set %dth candidate addr, id %d, fqdn:%s, port:%d", i, naddr->nodeId, SCH_GET_CUR_EP(naddr)->fqdn,
SCH_GET_CUR_EP(naddr)->port);
SCH_TASK_TLOG("set %dth candidate addr, id %d, inUse:%d/%d, fqdn:%s, port:%d", i, naddr->nodeId, naddr->epSet.inUse, naddr->epSet.numOfEps,
SCH_GET_CUR_EP(naddr)->fqdn, SCH_GET_CUR_EP(naddr)->port);
++addNum;
}
@ -621,9 +644,9 @@ int32_t schSetTaskCandidateAddrs(SSchJob *pJob, SSchTask *pTask) {
}
pTask->candidateIdx = 0;
pTask->candidateAddrs = taosArrayInit(SCH_MAX_CANDIDATE_EP_NUM, sizeof(SQueryNodeAddr));
pTask->candidateAddrs = taosArrayInit(SCHEDULE_DEFAULT_MAX_NODE_NUM, sizeof(SQueryNodeAddr));
if (NULL == pTask->candidateAddrs) {
SCH_TASK_ELOG("taosArrayInit %d condidate addrs failed", SCH_MAX_CANDIDATE_EP_NUM);
SCH_TASK_ELOG("taosArrayInit %d condidate addrs failed", SCHEDULE_DEFAULT_MAX_NODE_NUM);
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
@ -790,6 +813,7 @@ int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
atomic_add_fetch_32(&pTask->level->taskLaunchedNum, 1);
pTask->execId++;
pTask->retryTimes++;
pTask->waitRetry = false;
SCH_TASK_DLOG("start to launch task, execId %d, retry %d", pTask->execId, pTask->retryTimes);
@ -885,9 +909,9 @@ void schDropTaskInHashList(SSchJob *pJob, SHashObj *list) {
int32_t schLaunchFetchTask(SSchJob *pJob) {
int32_t code = 0;
void *resData = atomic_load_ptr(&pJob->resData);
if (resData) {
SCH_JOB_DLOG("res already fetched, res:%p", resData);
void *fetchRes = atomic_load_ptr(&pJob->fetchRes);
if (fetchRes) {
SCH_JOB_DLOG("res already fetched, res:%p", fetchRes);
return TSDB_CODE_SUCCESS;
}

View File

@ -170,7 +170,7 @@ void rpcSetDefaultAddr(void* thandle, const char* ip, const char* fqdn) {
transSetDefaultAddr(thandle, ip, fqdn);
}
int64_t rpcAllocHandle() { return transAllocHandle(); }
void* rpcAllocHandle() { return (void*)transAllocHandle(); }
int32_t rpcInit() {
transInit();

View File

@ -1,4 +1,5 @@
/** Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
@ -809,7 +810,7 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) {
conn = exh->handle;
if (conn == NULL) {
conn = getConnFromPool(pThrd->pool, EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet));
*ignore = (conn && 0 == specifyConnRef(conn, true, refId)) ? false : true;
if (conn != NULL) specifyConnRef(conn, true, refId);
}
transReleaseExHandle(transGetRefMgt(), refId);
}
@ -849,14 +850,20 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
bool ignore = false;
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
if (ignore == true) {
tError("ignore msg");
return;
}
if (conn != NULL) {
transCtxMerge(&conn->ctx, &pCtx->appCtx);
transQueuePush(&conn->cliMsgs, pMsg);
cliSend(conn);
} else {
conn = cliCreateConn(pThrd);
int64_t refId = (int64_t)pMsg->msg.info.handle;
if (refId != 0) specifyConnRef(conn, true, refId);
transCtxMerge(&conn->ctx, &pCtx->appCtx);
transQueuePush(&conn->cliMsgs, pMsg);
@ -1206,7 +1213,13 @@ SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle, bool* validHandle) {
if (idx < 0) return NULL;
return ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
}
return transGetWorkThrdFromHandle(handle, validHandle);
SCliThrd* pThrd = transGetWorkThrdFromHandle(handle, validHandle);
if (*validHandle == true && pThrd == NULL) {
int idx = cliRBChoseIdx(trans);
if (idx < 0) return NULL;
pThrd = ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
}
return pThrd;
}
void transReleaseCliHandle(void* handle) {
int idx = -1;

View File

@ -266,26 +266,27 @@
./test.sh -f tsim/tmq/snapshot1.sim
# --- stable
./test.sh -f tsim/stable/alter_comment.sim
./test.sh -f tsim/stable/alter_count.sim
./test.sh -f tsim/stable/alter_import.sim
./test.sh -f tsim/stable/alter_insert1.sim
./test.sh -f tsim/stable/alter_insert2.sim
./test.sh -f tsim/stable/alter_metrics.sim
./test.sh -f tsim/stable/column_add.sim
./test.sh -f tsim/stable/column_drop.sim
./test.sh -f tsim/stable/column_modify.sim
./test.sh -f tsim/stable/disk.sim
./test.sh -f tsim/stable/dnode3.sim
./test.sh -f tsim/stable/metrics.sim
./test.sh -f tsim/stable/refcount.sim
./test.sh -f tsim/stable/show.sim
./test.sh -f tsim/stable/values.sim
./test.sh -f tsim/stable/vnode3.sim
./test.sh -f tsim/stable/column_add.sim
./test.sh -f tsim/stable/column_drop.sim
./test.sh -f tsim/stable/column_modify.sim
./test.sh -f tsim/stable/tag_add.sim
./test.sh -f tsim/stable/tag_drop.sim
./test.sh -f tsim/stable/tag_filter.sim
./test.sh -f tsim/stable/tag_modify.sim
./test.sh -f tsim/stable/tag_rename.sim
./test.sh -f tsim/stable/alter_comment.sim
./test.sh -f tsim/stable/alter_count.sim
./test.sh -f tsim/stable/alter_insert1.sim
./test.sh -f tsim/stable/alter_insert2.sim
./test.sh -f tsim/stable/alter_import.sim
./test.sh -f tsim/stable/tag_filter.sim
./test.sh -f tsim/stable/values.sim
./test.sh -f tsim/stable/vnode3.sim
# --- for multi process mode
./test.sh -f tsim/user/basic.sim -m
@ -309,6 +310,7 @@
./test.sh -f tsim/valgrind/checkError1.sim
./test.sh -f tsim/valgrind/checkError2.sim
./test.sh -f tsim/valgrind/checkError3.sim
./test.sh -f tsim/valgrind/checkError4.sim
# --- vnode
# unsupport ./test.sh -f tsim/vnode/replica3_basic.sim
@ -366,7 +368,7 @@
./test.sh -f tsim/compute/diff2.sim
./test.sh -f tsim/compute/first.sim
./test.sh -f tsim/compute/interval.sim
# jira ./test.sh -f tsim/compute/last_row.sim
./test.sh -f tsim/compute/last_row.sim
./test.sh -f tsim/compute/last.sim
./test.sh -f tsim/compute/leastsquare.sim
./test.sh -f tsim/compute/max.sim
@ -415,7 +417,7 @@
./test.sh -f tsim/tag/4.sim
./test.sh -f tsim/tag/5.sim
# jira ./test.sh -f tsim/tag/6.sim
# jira ./test.sh -f tsim/tag/add.sim
./test.sh -f tsim/tag/add.sim
./test.sh -f tsim/tag/bigint.sim
./test.sh -f tsim/tag/binary_binary.sim
./test.sh -f tsim/tag/binary.sim

View File

@ -65,8 +65,6 @@ if $data00 != 19 then
return -1
endi
print =============== step7
sql select last_row(tbcol) from $mt
print ===> $data00

View File

@ -99,7 +99,7 @@ if $rows != 1 then
endi
#sql select * from information_schema.`streams`
sql select * from information_schema.user_tables
if $rows != 30 then
if $rows != 31 then
return -1
endi
#sql select * from information_schema.user_table_distributed
@ -197,7 +197,7 @@ if $rows != 1 then
endi
#sql select * from performance_schema.`streams`
sql select * from information_schema.user_tables
if $rows != 30 then
if $rows != 31 then
return -1
endi
#sql select * from information_schema.user_table_distributed

View File

@ -934,11 +934,8 @@ if $data79 != null then
endi
print ======== step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 3000
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql select * from tb order by ts asc
if $rows != 8 then

View File

@ -604,9 +604,7 @@ sql_error alter table tb drop column a
print ======== step9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 3000
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql select * from tb order by ts desc
if $rows != 7 then

View File

@ -69,8 +69,6 @@ if $rows != 2 then
return -1
endi
sleep 100
print =============== step2
$i = 1
$tb = $tbPrefix . $i

View File

@ -24,7 +24,7 @@ sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 tinyint, tgcol2 int,
$i = 0
while $i < 5
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( 0, 0, 0, 0, 0 )
sql create table $tb using $mt tags( 0, 0, 0, 0, '0' )
$x = 0
while $x < $rowNum
$ms = $x . m
@ -35,7 +35,7 @@ while $i < 5
endw
while $i < 10
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( 1, 1, 1, 1, 1 )
sql create table $tb using $mt tags( 1, 1, 1, 1, '1' )
$x = 0
while $x < $rowNum
$ms = $x . m

View File

@ -250,8 +250,8 @@ sql alter table $mt add tag tgcol6 binary(10)
sql reset query cache
sql alter table $tb set tag tgcol4=false
sql alter table $tb set tag tgcol5=5
sql alter table $tb set tag tgcol6=6
sql alter table $tb set tag tgcol5='5'
sql alter table $tb set tag tgcol6='6'
sql reset query cache
sql select * from $mt where tgcol5 = '5'
@ -390,8 +390,8 @@ sql alter table $mt add tag tgcol5 binary(17)
sql alter table $mt add tag tgcol6 bool
sql reset query cache
sql alter table $tb set tag tgcol4=4
sql alter table $tb set tag tgcol5=5
sql alter table $tb set tag tgcol6=1
sql alter table $tb set tag tgcol5='5'
sql alter table $tb set tag tgcol6='1'
sql reset query cache
sql select * from $mt where tgcol5 = '5'
@ -409,7 +409,7 @@ endi
if $data03 != 5 then
return -1
endi
if $data04 != 1 then
if $data04 != 0 then
return -1
endi
@ -425,7 +425,7 @@ $i = 9
$mt = $mtPrefix . $i
$tb = $tbPrefix . $i
sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol1 double, tgcol2 binary(10), tgcol3 binary(10))
sql create table $tb using $mt tags( 1, 2, '3' )
sql create table $tb using $mt tags( 1, '2', '3' )
sql insert into $tb values(now, 1)
sql select * from $mt where tgcol2 = '2'
if $rows != 1 then
@ -520,7 +520,7 @@ sql alter table $mt add tag tgcol4 binary(10)
sql alter table $mt add tag tgcol5 bool
sql reset query cache
sql alter table $tb set tag tgcol4=4
sql alter table $tb set tag tgcol4='4'
sql alter table $tb set tag tgcol5=false
sql reset query cache
@ -598,9 +598,9 @@ sql alter table $mt add tag tgcol7 bigint
sql alter table $mt add tag tgcol8 smallint
sql reset query cache
sql alter table $tb set tag tgcol4=4
sql alter table $tb set tag tgcol4='4'
sql alter table $tb set tag tgcol5=5
sql alter table $tb set tag tgcol6=6
sql alter table $tb set tag tgcol6='6'
sql alter table $tb set tag tgcol7=7
sql alter table $tb set tag tgcol8=8
sql reset query cache
@ -687,11 +687,11 @@ sql alter table $mt add tag tgcol5 bigint
sql reset query cache
sql alter table $tb set tag tgcol1=false
sql alter table $tb set tag tgcol2=5
sql alter table $tb set tag tgcol2='5'
sql alter table $tb set tag tgcol3=4
sql alter table $tb set tag tgcol4=3
sql alter table $tb set tag tgcol4='3'
sql alter table $tb set tag tgcol5=2
sql alter table $tb set tag tgcol6=1
sql alter table $tb set tag tgcol6='1'
sql reset query cache
sql select * from $mt where tgcol4 = '3'
@ -783,8 +783,8 @@ sql alter table $mt add tag tgcol4 int
sql alter table $mt add tag tgcol6 bigint
sql reset query cache
sql alter table $tb set tag tgcol1=7
sql alter table $tb set tag tgcol2=8
sql alter table $tb set tag tgcol1='7'
sql alter table $tb set tag tgcol2='8'
sql alter table $tb set tag tgcol3=9
sql alter table $tb set tag tgcol4=10
sql alter table $tb set tag tgcol5=11

View File

@ -40,19 +40,19 @@ if $data03 != 2 then
return -1
endi
sql alter table $mt change tag tagcx tgcol3 -x step21
sql alter table $mt rename tag tagcx tgcol3 -x step21
return -1
step21:
sql alter table $mt change tag tgcol1 tgcol2 -x step22
sql alter table $mt rename tag tgcol1 tgcol2 -x step22
return -1
step22:
#sql alter table $mt change tag tgcol1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -x step20
#sql alter table $mt rename tag tgcol1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -x step20
# return -1
#step20:
sql alter table $mt change tag tgcol1 tgcol3
sql alter table $mt change tag tgcol2 tgcol4
sql alter table $mt change tag tgcol4 tgcol3 -x step23
sql alter table $mt rename tag tgcol1 tgcol3
sql alter table $mt rename tag tgcol2 tgcol4
sql alter table $mt rename tag tgcol4 tgcol3 -x step23
return -1
step23:
@ -77,8 +77,8 @@ if $data03 != 2 then
return -1
endi
sql alter table $mt change tag tgcol1 tgcol3
sql alter table $mt change tag tgcol2 tgcol4
sql alter table $mt rename tag tgcol1 tgcol3
sql alter table $mt rename tag tgcol2 tgcol4
print =============== step4
$i = 4
@ -101,8 +101,8 @@ if $data03 != 2.00000 then
return -1
endi
sql alter table $mt change tag tgcol1 tgcol3
sql alter table $mt change tag tgcol2 tgcol4
sql alter table $mt rename tag tgcol1 tgcol3
sql alter table $mt rename tag tgcol2 tgcol4
print =============== step5
$i = 5
@ -125,8 +125,8 @@ if $data03 != 2 then
return -1
endi
sql alter table $mt change tag tgcol1 tgcol3
sql alter table $mt change tag tgcol2 tgcol4
sql alter table $mt rename tag tgcol1 tgcol3
sql alter table $mt rename tag tgcol2 tgcol4
print =============== step6
$i = 6
@ -163,15 +163,14 @@ endi
sql alter table $mt drop tag tgcol3
sql reset query cache
sql alter table $mt change tag tgcol4 tgcol3
sql alter table $mt change tag tgcol1 tgcol7
sql alter table $mt change tag tgcol2 tgcol8
sql alter table $mt rename tag tgcol4 tgcol3
sql alter table $mt rename tag tgcol1 tgcol7
sql alter table $mt rename tag tgcol2 tgcol8
sql reset query cache
sql alter table $mt change tag tgcol3 tgcol9
sql alter table $mt change tag tgcol5 tgcol10
sql alter table $mt change tag tgcol6 tgcol11
sql alter table $mt rename tag tgcol3 tgcol9
sql alter table $mt rename tag tgcol5 tgcol10
sql alter table $mt rename tag tgcol6 tgcol11
sleep 3000
sql reset query cache
print =============== step2

View File

@ -1,28 +1,13 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/cfg.sh -n dnode1 -c debugflag -v 131
system sh/cfg.sh -n dnode2 -c debugflag -v 131
system sh/exec.sh -n dnode1 -s start -v
system sh/exec.sh -n dnode2 -s start -v
sql connect
print =============== step1: show dnodes
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ---> dnode not ready!
return -1
endi
sql show dnodes
print ---> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
if $data(1)[4] != ready then
goto step1
endi
print =============== step2: create alter drop show user
print =============== step1: create alter drop show user
sql create user u1 pass 'taosdata'
sql show users
sql alter user u1 sysinfo 1
@ -31,11 +16,34 @@ sql alter user u1 pass 'taosdata'
sql drop user u1
sql_error alter user u2 sysinfo 0
print =============== step3: create drop dnode
print =============== step2 create drop dnode
sql create dnode $hostname port 7200
sql drop dnode 2
sql create dnode $hostname port 7300
sql drop dnode 3
sql alter dnode 1 'debugflag 131'
print =============== step3: show dnodes
$x = 0
step3:
$x = $x + 1
sleep 1000
if $x == 60 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
if $rows != 2 then
return -1
endi
if $data(1)[4] != ready then
goto step3
endi
if $data(2)[4] != ready then
goto step3
endi
print =============== create database, stable, table
sql create database db vgroups 3
sql use db
@ -45,7 +53,7 @@ sql create table tba (ts timestamp, c1 binary(10), c2 nchar(10));
print =============== run show xxxx
sql show dnodes
if $rows != 1 then
if $rows != 2 then
return -1
endi
@ -76,7 +84,7 @@ endi
print =============== run select * from information_schema.xxxx
sql select * from information_schema.`dnodes`
if $rows != 1 then
if $rows != 2 then
return -1
endi
@ -96,7 +104,7 @@ if $rows != 1 then
endi
sql select * from information_schema.user_tables
if $rows != 30 then
if $rows != 31 then
return -1
endi
@ -127,16 +135,27 @@ endi
print =============== stop
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print =============== check
$null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content > 1 then
if $system_content > 0 then
return -1
endi
if $system_content == $null then
return -1
endi
system_content sh/checkValgrind.sh -n dnode2
print cmd return result ----> [ $system_content ]
if $system_content > 2 then
return -1
endi
if $system_content == $null then
return -1
endi

View File

@ -98,7 +98,7 @@ $null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content > 1 then
if $system_content > 0 then
return -1
endi

View File

@ -90,7 +90,7 @@ $null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content > 1 then
if $system_content > 0 then
return -1
endi

View File

@ -0,0 +1,94 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c debugflag -v 131
system sh/exec.sh -n dnode1 -s start -v
sql connect
print =============== step1: create drop show dnodes
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ---> dnode not ready!
return -1
endi
sql show dnodes
print ---> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
if $data(1)[4] != ready then
goto step1
endi
print =============== step2: create db
sql create database d1 vgroups 2 buffer 3
sql create database d2 vgroups 2 buffer 3
sql show databases;
sql show d1.vgroups;
print =============== step3: create show stable
sql create table if not exists d1.stb1 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table if not exists d1.stb2 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table if not exists d2.stb1 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql show d1.stables
sql show d2.stables
print =============== step4: create show table
sql create table d1.ct1 using d1.stb1 tags(1000)
sql create table d1.ct2 using d1.stb1 tags(2000)
sql create table d1.ct3 using d1.stb2 tags(3000)
sql create table d2.ct1 using d2.stb1 tags(1000)
sql create table d2.ct2 using d2.stb1 tags(2000)
sql show d1.tables
sql show d2.tables
print =============== step5: insert data
sql insert into d1.ct1 values(now+0s, 10, 2.0, 3.0)
sql insert into d2.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
sql insert into d1.ct2 values(now+0s, 10, 2.0, 3.0)
sql insert into d2.ct2 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
sql insert into d1.ct3 values('2021-01-01 00:00:00.000', 10, 2.0, 3.0)
print =============== step6: create db
sql drop table d1.ct1
sql drop table d1.stb2
sql drop database d1
sql drop database d2
print =============== step7: repeat
sql create database d1 vgroups 2 buffer 3
sql create database d2 vgroups 2 buffer 3
sql create table if not exists d1.stb1 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table if not exists d1.stb2 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table if not exists d2.stb1 (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table d1.ct1 using d1.stb1 tags(1000)
sql create table d1.ct2 using d1.stb1 tags(2000)
sql create table d1.ct3 using d1.stb2 tags(3000)
sql create table d2.ct1 using d2.stb1 tags(1000)
sql create table d2.ct2 using d2.stb1 tags(2000)
sql insert into d1.ct1 values(now+0s, 10, 2.0, 3.0)
sql insert into d2.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
sql insert into d1.ct2 values(now+0s, 10, 2.0, 3.0)
sql insert into d2.ct2 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3)
sql insert into d1.ct3 values('2021-01-01 00:00:00.000', 10, 2.0, 3.0)
sql drop table d1.ct1
sql drop table d1.stb2
sql drop database d1
sql drop database d2
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
print =============== check
$null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content > 0 then
return -1
endi
if $system_content == $null then
return -1
endi

View File

@ -0,0 +1,82 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c debugflag -v 131
system sh/exec.sh -n dnode1 -s start -v
sql connect
print =============== step1: create drop show dnodes
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ---> dnode not ready!
return -1
endi
sql show dnodes
print ---> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
if $data(1)[4] != ready then
goto step1
endi
print =============== step2: create db
sql create database db
sql use db
sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
print =============== step3: alter stb
sql_error alter table db.stb add column ts int
sql alter table db.stb add column c3 int
sql alter table db.stb add column c4 bigint
sql alter table db.stb add column c5 binary(12)
sql alter table db.stb drop column c1
sql alter table db.stb drop column c4
sql alter table db.stb MODIFY column c2 binary(32)
sql alter table db.stb add tag t4 bigint
sql alter table db.stb add tag c1 int
sql alter table db.stb add tag t5 binary(12)
sql alter table db.stb drop tag c1
sql alter table db.stb drop tag t5
sql alter table db.stb MODIFY tag t3 binary(32)
sql alter table db.stb rename tag t1 tx
sql alter table db.stb comment 'abcde' ;
goto _OVER
print =============== step4: alter tb
sql create table tb (ts timestamp, a int)
sql insert into tb values(now-28d, -28)
sql select count(a) from tb
sql alter table tb add column b smallint
sql insert into tb values(now-25d, -25, 0)
sql select count(b) from tb
sql alter table tb add column c tinyint
sql insert into tb values(now-22d, -22, 3, 0)
sql select count(c) from tb
sql alter table tb add column d int
sql insert into tb values(now-19d, -19, 6, 0, 0)
sql select count(d) from tb
sql alter table tb add column e bigint
sql alter table tb add column f float
sql alter table tb add column g double
sql alter table tb add column h binary(10)
sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb
sql select * from tb order by ts desc
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
print =============== check
$null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content > 0 then
return -1
endi
if $system_content == $null then
return -1
endi

View File

@ -0,0 +1,77 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c debugflag -v 131
system sh/exec.sh -n dnode1 -s start -v
sql connect
print =============== step1: create drop show dnodes
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ---> dnode not ready!
return -1
endi
sql show dnodes
print ---> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
if $data(1)[4] != ready then
goto step1
endi
$tbPrefix = tb
$tbNum = 5
$rowNum = 10
print =============== step2: prepare data
sql create database db vgroups 2
sql use db
sql create table if not exists stb (ts timestamp, tbcol int, tbcol2 float, tbcol3 double) tags (tgcol int unsigned)
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using stb tags( $i )
$x = 0
while $x < $rowNum
$cc = $x * 60000
$ms = 1601481600000 + $cc
sql insert into $tb values ($ms , $x , $x , $x )
$x = $x + 1
endw
$i = $i + 1
endw
print =============== step3: avg
sql select avg(tbcol) from tb1
sql select avg(tbcol) from tb1 where ts <= 1601481840000
sql select avg(tbcol) as b from tb1
sql select avg(tbcol) as b from tb1 interval(1d)
goto _OVER
sql select avg(tbcol) as b from tb1 where ts <= 1601481840000s interval(1m)
sql select avg(tbcol) as c from stb
sql select avg(tbcol) as c from stb where ts <= 1601481840000
sql select avg(tbcol) as c from stb where tgcol < 5 and ts <= 1601481840000
sql select avg(tbcol) as c from stb interval(1m)
sql select avg(tbcol) as c from stb interval(1d)
sql select avg(tbcol) as b from stb where ts <= 1601481840000s interval(1m)
sql select avg(tbcol) as c from stb group by tgcol
sql select avg(tbcol) as b from stb where ts <= 1601481840000s partition by tgcol interval(1m)
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
print =============== check
$null=
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content > 0 then
return -1
endi
if $system_content == $null then
return -1
endi

View File

@ -0,0 +1,305 @@
from distutils.log import error
import taos
import sys
import time
import os
import platform
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
import subprocess
class TDTestCase:
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":0}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
print(projPath)
if platform.system().lower() == 'windows':
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf2 = subprocess.Popen('(for /r %s %%i in ("udf2.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
if (not tdDnodes.dnodes[0].remoteIP == ""):
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf2.so',projPath+"\\debug\\build\\lib\\")
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
self.libudf2 = self.libudf2.replace('udf2.dll','libudf2.so')
else:
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
self.libudf2 = self.libudf2.replace('\r','').replace('\n','')
def prepare_data(self):
tdSql.execute("drop database if exists db ")
tdSql.execute("create database if not exists db duration 300")
tdSql.execute("use db")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))")
tdSql.execute(
f'''insert into tb values
( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" )
( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" )
( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" )
( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" )
( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" )
( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" )
( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" )
( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" )
( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
'''
)
# udf functions with join
ts_start = 1652517451000
tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)")
tdSql.execute("create table sub1 using st tags(1)")
tdSql.execute("create table sub2 using st tags(2)")
for i in range(10):
ts = ts_start + i *1000
tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
def create_udf_function(self):
for i in range(5):
# create scalar functions
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
# create aggregate functions
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 2:
tdLog.info("create two udf functions success ")
# drop functions
tdSql.execute("drop function udf1")
tdSql.execute("drop function udf2")
functions = tdSql.getResult("show functions")
for function in functions:
if "udf1" in function[0] or "udf2" in function[0]:
tdLog.info("drop udf functions failed ")
tdLog.exit("drop udf functions failed")
tdLog.info("drop two udf functions success ")
# create scalar functions
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
# create aggregate functions
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 2:
tdLog.info("create two udf functions success ")
def basic_udf_query(self):
# scalar functions
tdSql.execute("use db ")
tdSql.error("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
tdSql.error("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
# aggregate functions
tdSql.error("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
# Arithmetic compute
tdSql.error("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb")
tdSql.error("select udf2(c1) ,udf2(c6) from stb1 ")
tdSql.error("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ")
# # bug for crash when query sub table
tdSql.error("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1")
tdSql.error("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ")
# regular table with aggregate functions
tdSql.error("select udf1(num1) , count(num1) from tb;")
tdSql.error("select udf1(num1) , avg(num1) from tb;")
tdSql.error("select udf1(num1) , twa(num1) from tb;")
tdSql.error("select udf1(num1) , irate(num1) from tb;")
tdSql.error("select udf1(num1) , sum(num1) from tb;")
tdSql.error("select udf1(num1) , stddev(num1) from tb;")
tdSql.error("select udf1(num1) , mode(num1) from tb;")
tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;")
# stable
tdSql.error("select udf1(c1) , count(c1) from stb1;")
tdSql.error("select udf1(c1) , avg(c1) from stb1;")
tdSql.error("select udf1(c1) , twa(c1) from stb1;")
tdSql.error("select udf1(c1) , irate(c1) from stb1;")
tdSql.error("select udf1(c1) , sum(c1) from stb1;")
tdSql.error("select udf1(c1) , stddev(c1) from stb1;")
tdSql.error("select udf1(c1) , mode(c1) from stb1;")
tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;")
# regular table with select functions
tdSql.error("select udf1(num1) , max(num1) from tb;")
tdSql.error("select udf1(num1) , min(num1) from tb;")
tdSql.error("select udf1(num1) , first(num1) from tb;")
tdSql.error("select udf1(num1) , last(num1) from tb;")
tdSql.error("select udf1(num1) , top(num1,1) from tb;")
tdSql.error("select udf1(num1) , bottom(num1,1) from tb;")
# stable
tdSql.error("select udf1(c1) , max(c1) from stb1;")
tdSql.error("select udf1(c1) , min(c1) from stb1;")
tdSql.error("select udf1(c1) , first(c1) from stb1;")
tdSql.error("select udf1(c1) , last(c1) from stb1;")
tdSql.error("select udf1(c1) , top(c1 ,1) from stb1;")
tdSql.error("select udf1(c1) , bottom(c1,1) from stb1;")
# regular table with compute functions
tdSql.error("select udf1(num1) , abs(num1) from tb;")
# stable with compute functions
tdSql.error("select udf1(c1) , abs(c1) from stb1;")
# nest query
tdSql.error("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;")
tdSql.error("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
# bug fix for crash
# order by udf function result
for _ in range(50):
tdSql.error("select udf2(c1) from stb1 group by 1-udf1(c1)")
print(tdSql.queryResult)
# udf functions with filter
tdSql.error("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
tdSql.error("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.error("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.error("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
# udf functions with group by
tdSql.error("select udf1(c1) from ct1 group by c1")
tdSql.error("select udf1(c1) from stb1 group by c1")
tdSql.error("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2")
tdSql.error("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2")
tdSql.error("select udf2(c1) from ct1 group by c1")
tdSql.error("select udf2(c1) from stb1 group by c1")
tdSql.error("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2")
tdSql.error("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2")
tdSql.error("select udf2(c1) from stb1 group by udf1(c1)")
tdSql.error("select udf2(c1) from stb1 group by floor(c1)")
# udf mix with order by
tdSql.error("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)")
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
print(" env is ok for all ")
self.prepare_udf_so()
self.prepare_data()
self.create_udf_function()
self.basic_udf_query()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,667 @@
from distutils.log import error
import taos
import sys
import time
import os
import platform
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
import subprocess
class TDTestCase:
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":1}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files or "taosd.exe" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
print(projPath)
if platform.system().lower() == 'windows':
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf2 = subprocess.Popen('(for /r %s %%i in ("udf2.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
if (not tdDnodes.dnodes[0].remoteIP == ""):
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf2.so',projPath+"\\debug\\build\\lib\\")
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
self.libudf2 = self.libudf2.replace('udf2.dll','libudf2.so')
else:
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
self.libudf2 = self.libudf2.replace('\r','').replace('\n','')
def prepare_data(self):
tdSql.execute("drop database if exists db ")
tdSql.execute("create database if not exists db duration 300")
tdSql.execute("use db")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))")
tdSql.execute(
f'''insert into tb values
( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" )
( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" )
( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" )
( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" )
( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" )
( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" )
( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" )
( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" )
( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
'''
)
# udf functions with join
ts_start = 1652517451000
tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)")
tdSql.execute("create table sub1 using st tags(1)")
tdSql.execute("create table sub2 using st tags(2)")
for i in range(10):
ts = ts_start + i *1000
tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
def create_udf_function(self):
for i in range(5):
# create scalar functions
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
# create aggregate functions
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 2:
tdLog.info("create two udf functions success ")
# drop functions
tdSql.execute("drop function udf1")
tdSql.execute("drop function udf2")
functions = tdSql.getResult("show functions")
for function in functions:
if "udf1" in function[0] or "udf2" in function[0]:
tdLog.info("drop udf functions failed ")
tdLog.exit("drop udf functions failed")
tdLog.info("drop two udf functions success ")
# create scalar functions
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
# create aggregate functions
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 2:
tdLog.info("create two udf functions success ")
def basic_udf_query(self):
# scalar functions
tdSql.execute("use db ")
tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,1)
tdSql.checkData(0,3,88)
tdSql.checkData(0,4,1.000000000)
tdSql.checkData(0,5,88)
tdSql.checkData(0,6,"binary1")
tdSql.checkData(0,7,88)
tdSql.checkData(3,0,3)
tdSql.checkData(3,1,88)
tdSql.checkData(3,2,33333)
tdSql.checkData(3,3,88)
tdSql.checkData(3,4,33.000000000)
tdSql.checkData(3,5,88)
tdSql.checkData(3,6,"binary1")
tdSql.checkData(3,7,88)
tdSql.checkData(11,0,None)
tdSql.checkData(11,1,None)
tdSql.checkData(11,2,None)
tdSql.checkData(11,3,None)
tdSql.checkData(11,4,None)
tdSql.checkData(11,5,None)
tdSql.checkData(11,6,"binary1")
tdSql.checkData(11,7,88)
tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,None)
tdSql.checkData(0,3,None)
tdSql.checkData(0,4,None)
tdSql.checkData(0,5,None)
tdSql.checkData(0,6,None)
tdSql.checkData(0,7,None)
tdSql.checkData(20,0,8)
tdSql.checkData(20,1,88)
tdSql.checkData(20,2,88888)
tdSql.checkData(20,3,88)
tdSql.checkData(20,4,888)
tdSql.checkData(20,5,88)
tdSql.checkData(20,6,88)
tdSql.checkData(20,7,88)
# aggregate functions
tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
tdSql.checkData(0,0,15.362291496)
tdSql.checkData(0,1,10000949.553189287)
tdSql.checkData(0,2,168.633425216)
# Arithmetic compute
tdSql.query("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb")
tdSql.checkData(0,0,115.362291496)
tdSql.checkData(0,1,10000849.553189287)
tdSql.checkData(0,2,16863.342521576)
tdSql.checkData(0,3,1.686334252)
tdSql.query("select udf2(c1) ,udf2(c6) from stb1 ")
tdSql.checkData(0,0,25.514701644)
tdSql.checkData(0,1,265.247614504)
tdSql.query("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ")
tdSql.checkData(0,0,125.514701644)
tdSql.checkData(0,1,165.247614504)
tdSql.checkData(0,2,2551.470164435)
tdSql.checkData(0,3,2.652476145)
# # bug for crash when query sub table
tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1")
tdSql.checkData(0,0,378.215547010)
tdSql.checkData(0,1,353.808067460)
tdSql.checkData(0,2,2114.237451187)
tdSql.checkData(0,3,2.125468151)
tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ")
tdSql.checkData(0,0,490.358032462)
tdSql.checkData(0,1,400.460106627)
tdSql.checkData(0,2,2551.470164435)
tdSql.checkData(0,3,2.652476145)
# regular table with aggregate functions
tdSql.error("select udf1(num1) , count(num1) from tb;")
tdSql.error("select udf1(num1) , avg(num1) from tb;")
tdSql.error("select udf1(num1) , twa(num1) from tb;")
tdSql.error("select udf1(num1) , irate(num1) from tb;")
tdSql.error("select udf1(num1) , sum(num1) from tb;")
tdSql.error("select udf1(num1) , stddev(num1) from tb;")
tdSql.error("select udf1(num1) , mode(num1) from tb;")
tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;")
# stable
tdSql.error("select udf1(c1) , count(c1) from stb1;")
tdSql.error("select udf1(c1) , avg(c1) from stb1;")
tdSql.error("select udf1(c1) , twa(c1) from stb1;")
tdSql.error("select udf1(c1) , irate(c1) from stb1;")
tdSql.error("select udf1(c1) , sum(c1) from stb1;")
tdSql.error("select udf1(c1) , stddev(c1) from stb1;")
tdSql.error("select udf1(c1) , mode(c1) from stb1;")
tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;")
# regular table with select functions
tdSql.query("select udf1(num1) , max(num1) from tb;")
tdSql.checkRows(1)
tdSql.query("select floor(num1) , max(num1) from tb;")
tdSql.checkRows(1)
tdSql.query("select udf1(num1) , min(num1) from tb;")
tdSql.checkRows(1)
tdSql.query("select ceil(num1) , min(num1) from tb;")
tdSql.checkRows(1)
tdSql.query("select udf1(num1) , first(num1) from tb;")
tdSql.query("select abs(num1) , first(num1) from tb;")
tdSql.query("select udf1(num1) , last(num1) from tb;")
tdSql.query("select round(num1) , last(num1) from tb;")
tdSql.query("select udf1(num1) , top(num1,1) from tb;")
tdSql.checkRows(1)
tdSql.query("select udf1(num1) , bottom(num1,1) from tb;")
tdSql.checkRows(1)
# tdSql.query("select udf1(num1) , last_row(num1) from tb;")
# tdSql.checkRows(1)
# tdSql.query("select round(num1) , last_row(num1) from tb;")
# tdSql.checkRows(1)
# stable
tdSql.query("select udf1(c1) , max(c1) from stb1;")
tdSql.checkRows(1)
tdSql.query("select abs(c1) , max(c1) from stb1;")
tdSql.checkRows(1)
tdSql.query("select udf1(c1) , min(c1) from stb1;")
tdSql.checkRows(1)
tdSql.query("select floor(c1) , min(c1) from stb1;")
tdSql.checkRows(1)
tdSql.query("select udf1(c1) , first(c1) from stb1;")
tdSql.query("select udf1(c1) , last(c1) from stb1;")
tdSql.query("select udf1(c1) , top(c1 ,1) from stb1;")
tdSql.checkRows(1)
tdSql.query("select abs(c1) , top(c1 ,1) from stb1;")
tdSql.checkRows(1)
tdSql.query("select udf1(c1) , bottom(c1,1) from stb1;")
tdSql.checkRows(1)
tdSql.query("select ceil(c1) , bottom(c1,1) from stb1;")
tdSql.checkRows(1)
# tdSql.query("select udf1(c1) , last_row(c1) from stb1;")
# tdSql.checkRows(1)
# tdSql.query("select ceil(c1) , last_row(c1) from stb1;")
# tdSql.checkRows(1)
# regular table with compute functions
tdSql.query("select udf1(num1) , abs(num1) from tb;")
tdSql.checkRows(12)
tdSql.query("select floor(num1) , abs(num1) from tb;")
tdSql.checkRows(12)
# # bug need fix
#tdSql.query("select udf1(num1) , csum(num1) from tb;")
#tdSql.checkRows(9)
#tdSql.query("select ceil(num1) , csum(num1) from tb;")
#tdSql.checkRows(9)
#tdSql.query("select udf1(c1) , csum(c1) from stb1;")
#tdSql.checkRows(22)
#tdSql.query("select floor(c1) , csum(c1) from stb1;")
#tdSql.checkRows(22)
# stable with compute functions
tdSql.query("select udf1(c1) , abs(c1) from stb1;")
tdSql.checkRows(25)
tdSql.query("select abs(c1) , ceil(c1) from stb1;")
tdSql.checkRows(25)
# nest query
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;")
tdSql.checkRows(25)
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(1,0,88)
tdSql.checkData(1,1,8)
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
tdSql.checkRows(13)
tdSql.checkData(0,0,88)
tdSql.checkData(0,1,8)
tdSql.checkData(1,0,88)
tdSql.checkData(1,1,7)
# bug fix for crash
# order by udf function result
for _ in range(50):
tdSql.query("select udf2(c1) from stb1 group by 1-udf1(c1)")
print(tdSql.queryResult)
# udf functions with filter
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;")
tdSql.checkRows(3)
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
tdSql.checkRows(3)
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,88)
tdSql.checkData(0,2,-99.990000000)
tdSql.checkData(0,3,88)
tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,0)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,10)
tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,88)
tdSql.checkData(0,1,88)
tdSql.checkData(1,0,88)
tdSql.checkData(1,1,88)
tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,88)
tdSql.checkData(0,2,0)
tdSql.checkData(0,3,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,88)
tdSql.checkData(1,2,10)
tdSql.checkData(1,3,88)
tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,16.881943016)
tdSql.checkData(0,1,168.819430161)
tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
# udf functions with group by
tdSql.query("select udf1(c1) from ct1 group by c1")
tdSql.checkRows(10)
tdSql.query("select udf1(c1) from stb1 group by c1")
tdSql.checkRows(11)
tdSql.query("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2")
tdSql.checkRows(10)
tdSql.query("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2")
tdSql.checkRows(11)
tdSql.query("select udf2(c1) from ct1 group by c1")
tdSql.checkRows(10)
tdSql.query("select udf2(c1) from stb1 group by c1")
tdSql.checkRows(11)
tdSql.query("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2")
tdSql.checkRows(10)
tdSql.query("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2")
tdSql.checkRows(11)
tdSql.query("select udf2(c1) from stb1 group by udf1(c1)")
tdSql.checkRows(2)
tdSql.query("select udf2(c1) from stb1 group by floor(c1)")
tdSql.checkRows(11)
# udf mix with order by
tdSql.query("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)")
tdSql.checkRows(11)
def multi_cols_udf(self):
tdSql.query("select num1,num2,num3,udf1(num1,num2,num3) from tb")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,1.000000000)
tdSql.checkData(0,3,None)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,1)
tdSql.checkData(1,2,1.110000000)
tdSql.checkData(1,3,88)
tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts")
tdSql.checkData(1,0,8)
tdSql.checkData(1,1,88.880000000)
tdSql.checkData(1,2,88)
tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;")
tdSql.checkRows(22)
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,169.661427555)
tdSql.checkData(0,1,169.661427555)
def try_query_sql(self):
udf1_sqls = [
"select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb" ,
"select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1" ,
"select udf1(num1) , max(num1) from tb;" ,
"select udf1(num1) , min(num1) from tb;" ,
#"select udf1(num1) , top(num1,1) from tb;" ,
#"select udf1(num1) , bottom(num1,1) from tb;" ,
"select udf1(c1) , max(c1) from stb1;" ,
"select udf1(c1) , min(c1) from stb1;" ,
#"select udf1(c1) , top(c1 ,1) from stb1;" ,
#"select udf1(c1) , bottom(c1,1) from stb1;" ,
"select udf1(num1) , abs(num1) from tb;" ,
#"select udf1(num1) , csum(num1) from tb;" ,
#"select udf1(c1) , csum(c1) from stb1;" ,
"select udf1(c1) , abs(c1) from stb1;" ,
"select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;" ,
"select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;" ,
"select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;" ,
"select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts" ,
"select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf1(c1) from ct1 group by c1" ,
"select udf1(c1) from stb1 group by c1" ,
"select c1,c2, udf1(c1,c2) from ct1 group by c1,c2" ,
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
]
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
"select udf2(num1) ,udf2(num2), udf2(num3) from tb" ,
"select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb" ,
"select udf2(c1) ,udf2(c6) from stb1 " ,
"select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 " ,
"select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1" ,
"select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 " ,
"select udf2(c1) from ct1 group by c1" ,
"select udf2(c1) from stb1 group by c1" ,
"select c1,c2, udf2(c1,c6) from ct1 group by c1,c2" ,
"select c1,c2, udf2(c1,c6) from stb1 group by c1,c2" ,
"select udf2(c1) from stb1 group by udf1(c1)" ,
"select udf2(c1) from stb1 group by floor(c1)" ,
"select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)" ,
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null"]
return udf1_sqls ,udf2_sqls
def unexpected_create(self):
tdLog.info(" create function with out bufsize ")
tdSql.query("drop function udf1 ")
tdSql.query("drop function udf2 ")
# create function without buffer
tdSql.execute("create function udf1 as '%s' outputtype int"%self.libudf1)
tdSql.execute("create aggregate function udf2 as '%s' outputtype double"%self.libudf2)
udf1_sqls ,udf2_sqls = self.try_query_sql()
for scalar_sql in udf1_sqls:
tdSql.query(scalar_sql)
for aggregate_sql in udf2_sqls:
tdSql.error(aggregate_sql)
# create function without aggregate
tdLog.info(" create function with out aggregate ")
tdSql.query("drop function udf1 ")
tdSql.query("drop function udf2 ")
# create function without buffer
tdSql.execute("create aggregate function udf1 as '%s' outputtype int bufSize 8 "%self.libudf1)
tdSql.execute("create function udf2 as '%s' outputtype double bufSize 8"%self.libudf2)
udf1_sqls ,udf2_sqls = self.try_query_sql()
for scalar_sql in udf1_sqls:
tdSql.error(scalar_sql)
for aggregate_sql in udf2_sqls:
tdSql.error(aggregate_sql)
tdSql.execute(" create function db as '%s' outputtype int bufSize 8 "%self.libudf1)
tdSql.execute(" create aggregate function test as '%s' outputtype int bufSize 8 "%self.libudf1)
tdSql.error(" select db(c1) from stb1 ")
tdSql.error(" select db(c1,c6), db(c6) from stb1 ")
tdSql.error(" select db(num1,num2), db(num1) from tb ")
tdSql.error(" select test(c1) from stb1 ")
tdSql.error(" select test(c1,c6), test(c6) from stb1 ")
tdSql.error(" select test(num1,num2), test(num1) from tb ")
def loop_kill_udfd(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
cfgPath = buildPath + "/../sim/dnode1/cfg"
udfdPath = buildPath +'/build/bin/udfd'
for i in range(3):
tdLog.info(" loop restart udfd %d_th" % i)
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,169.661427555)
tdSql.checkData(0,1,169.661427555)
# stop udfd cmds
get_processID = "ps -ef | grep -w udfd | grep -v grep| grep -v defunct | awk '{print $2}'"
processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
stop_udfd = " kill -9 %s" % processID
os.system(stop_udfd)
time.sleep(2)
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,169.661427555)
tdSql.checkData(0,1,169.661427555)
# # start udfd cmds
# start_udfd = "nohup " + udfdPath +'-c' +cfgPath +" > /dev/null 2>&1 &"
# tdLog.info("start udfd : %s " % start_udfd)
def test_function_name(self):
tdLog.info(" create function name is not build_in functions ")
tdSql.execute(" drop function udf1 ")
tdSql.execute(" drop function udf2 ")
tdSql.error("create function max as '%s' outputtype int bufSize 8"%self.libudf1)
tdSql.error("create aggregate function sum as '%s' outputtype double bufSize 8"%self.libudf2)
tdSql.error("create function max as '%s' outputtype int bufSize 8"%self.libudf1)
tdSql.error("create aggregate function sum as '%s' outputtype double bufSize 8"%self.libudf2)
tdSql.error("create aggregate function tbname as '%s' outputtype double bufSize 8"%self.libudf2)
tdSql.error("create aggregate function function as '%s' outputtype double bufSize 8"%self.libudf2)
tdSql.error("create aggregate function stable as '%s' outputtype double bufSize 8"%self.libudf2)
tdSql.error("create aggregate function union as '%s' outputtype double bufSize 8"%self.libudf2)
tdSql.error("create aggregate function 123 as '%s' outputtype double bufSize 8"%self.libudf2)
tdSql.error("create aggregate function 123db as '%s' outputtype double bufSize 8"%self.libudf2)
tdSql.error("create aggregate function mnode as '%s' outputtype double bufSize 8"%self.libudf2)
def restart_taosd_query_udf(self):
self.create_udf_function()
for i in range(5):
tdLog.info(" this is %d_th restart taosd " %i)
tdSql.execute("use db ")
tdSql.query("select count(*) from stb1")
tdSql.checkRows(1)
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,169.661427555)
tdSql.checkData(0,1,169.661427555)
tdDnodes.stop(1)
tdDnodes.start(1)
time.sleep(2)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
print(" env is ok for all ")
self.prepare_udf_so()
self.prepare_data()
self.create_udf_function()
self.basic_udf_query()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -11,6 +11,9 @@ python3 ./test.py -f 0-others/udfTest.py
python3 ./test.py -f 0-others/udf_create.py
python3 ./test.py -f 0-others/udf_restart_taosd.py
python3 ./test.py -f 0-others/cachelast.py
python3 ./test.py -f 0-others/udf_cfg1.py
python3 ./test.py -f 0-others/udf_cfg2.py
python3 ./test.py -f 0-others/sysinfo.py
python3 ./test.py -f 0-others/user_control.py
python3 ./test.py -f 0-others/fsync.py

@ -1 +1 @@
Subproject commit bd496f76b64931c66da2f8b0f24143a98a881cde
Subproject commit b7b922268c4a06d9db77ffdfde0726f3d9900b72