Merge branch '3.0' of https://github.com/taosdata/TDengine into feature/3.0_mhli
This commit is contained in:
commit
3327a0692f
|
@ -479,12 +479,8 @@ int32_t tDecodeSEpSet(SDecoder* pDecoder, SEpSet* pEp);
|
|||
int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp);
|
||||
void* taosDecodeSEpSet(const void* buf, SEpSet* pEp);
|
||||
|
||||
typedef struct {
|
||||
SEpSet epSet;
|
||||
} SMEpSet;
|
||||
|
||||
int32_t tSerializeSMEpSet(void* buf, int32_t bufLen, SMEpSet* pReq);
|
||||
int32_t tDeserializeSMEpSet(void* buf, int32_t buflen, SMEpSet* pReq);
|
||||
int32_t tSerializeSEpSet(void* buf, int32_t bufLen, const SEpSet* pEpset);
|
||||
int32_t tDeserializeSEpSet(void* buf, int32_t buflen, SEpSet* pEpset);
|
||||
|
||||
typedef struct {
|
||||
int8_t connType;
|
||||
|
@ -656,6 +652,9 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int32_t code;
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t sversion;
|
||||
int32_t tversion;
|
||||
} SQueryTableRsp;
|
||||
|
||||
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||
|
|
|
@ -182,8 +182,6 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_MQ_DISCONNECT, "vnode-mq-disconnect", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_RES_READY, "vnode-res-ready", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_TASKS_STATUS, "vnode-tasks-status", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_TASK, "vnode-cancel-task", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TASK, "vnode-drop-task", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL)
|
||||
|
|
|
@ -322,21 +322,22 @@ typedef enum EQueryExecMode {
|
|||
} EQueryExecMode;
|
||||
|
||||
typedef struct SQuery {
|
||||
ENodeType type;
|
||||
EQueryExecMode execMode;
|
||||
bool haveResultSet;
|
||||
SNode* pRoot;
|
||||
int32_t numOfResCols;
|
||||
SSchema* pResSchema;
|
||||
int8_t precision;
|
||||
SCmdMsgInfo* pCmdMsg;
|
||||
int32_t msgType;
|
||||
SArray* pDbList;
|
||||
SArray* pTableList;
|
||||
bool showRewrite;
|
||||
int32_t placeholderNum;
|
||||
SArray* pPlaceholderValues;
|
||||
SNode* pPrepareRoot;
|
||||
ENodeType type;
|
||||
EQueryExecMode execMode;
|
||||
bool haveResultSet;
|
||||
SNode* pRoot;
|
||||
int32_t numOfResCols;
|
||||
SSchema* pResSchema;
|
||||
int8_t precision;
|
||||
SCmdMsgInfo* pCmdMsg;
|
||||
int32_t msgType;
|
||||
SArray* pDbList;
|
||||
SArray* pTableList;
|
||||
bool showRewrite;
|
||||
int32_t placeholderNum;
|
||||
SArray* pPlaceholderValues;
|
||||
SNode* pPrepareRoot;
|
||||
struct SParseMetaCache* pMetaCache;
|
||||
} SQuery;
|
||||
|
||||
void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker walker, void* pContext);
|
||||
|
|
|
@ -23,6 +23,9 @@ extern "C" {
|
|||
#include "query.h"
|
||||
#include "querynodes.h"
|
||||
|
||||
struct SCatalogReq;
|
||||
struct SMetaData;
|
||||
|
||||
typedef struct SStmtCallback {
|
||||
TAOS_STMT* pStmt;
|
||||
int32_t (*getTbNameFn)(TAOS_STMT*, char**);
|
||||
|
@ -45,11 +48,17 @@ typedef struct SParseContext {
|
|||
SStmtCallback* pStmtCb;
|
||||
const char* pUser;
|
||||
bool isSuperUser;
|
||||
bool async;
|
||||
} SParseContext;
|
||||
|
||||
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
|
||||
bool qIsInsertSql(const char* pStr, size_t length);
|
||||
|
||||
// for async mode
|
||||
int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq);
|
||||
int32_t qSemanticAnalysisSql(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
|
||||
const struct SMetaData* pMetaData, SQuery* pQuery);
|
||||
|
||||
void qDestroyQuery(SQuery* pQueryNode);
|
||||
|
||||
int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema);
|
||||
|
|
|
@ -56,12 +56,6 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
|||
|
||||
int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
||||
int32_t qWorkerProcessDataSinkMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
||||
int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
||||
int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
||||
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
||||
int32_t qWorkerProcessFetchRsp(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
@ -72,10 +66,6 @@ int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
|||
|
||||
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
||||
int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
||||
int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg);
|
||||
|
||||
void qWorkerDestroy(void **qWorkerMgmt);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -643,6 +643,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_PAR_INVALID_DROP_COL TAOS_DEF_ERROR_CODE(0, 0x2651)
|
||||
#define TSDB_CODE_PAR_INVALID_COL_JSON TAOS_DEF_ERROR_CODE(0, 0x2652)
|
||||
#define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653)
|
||||
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2654)
|
||||
|
||||
//planner
|
||||
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
|
||||
|
|
|
@ -247,7 +247,7 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_EP_LEN (TSDB_FQDN_LEN + 6)
|
||||
#define TSDB_IPv4ADDR_LEN 16
|
||||
#define TSDB_FILENAME_LEN 128
|
||||
#define TSDB_SHOW_SQL_LEN 512
|
||||
#define TSDB_SHOW_SQL_LEN 1024
|
||||
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
||||
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
||||
|
||||
|
|
|
@ -394,8 +394,8 @@ int32_t validateSversion(SRequestObj* pRequest, void* res) {
|
|||
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
||||
|
||||
STbSVersion tbSver = {.tbFName = blk->tblFName, .sver = blk->sver};
|
||||
taosArrayPush(pArray, &tbSver);
|
||||
}
|
||||
} else if (TDMT_VND_QUERY == pRequest->type) {
|
||||
|
@ -552,12 +552,12 @@ int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest) {
|
|||
|
||||
int32_t removeMeta(STscObj* pTscObj, SArray* tbList) {
|
||||
SCatalog* pCatalog = NULL;
|
||||
int32_t tbNum = taosArrayGetSize(tbList);
|
||||
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
int32_t tbNum = taosArrayGetSize(tbList);
|
||||
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
|
||||
for (int32_t i = 0; i < tbNum; ++i) {
|
||||
SName* pTbName = taosArrayGet(tbList, i);
|
||||
catalogRemoveTableMeta(pCatalog, pTbName);
|
||||
|
@ -566,7 +566,6 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
||||
SRequestObj* pRequest = NULL;
|
||||
int32_t retryNum = 0;
|
||||
|
@ -589,7 +588,7 @@ SRequestObj* execQuery(STscObj* pTscObj, const char* sql, int sqlLen) {
|
|||
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
|
||||
removeMeta(pTscObj, pRequest->tableList);
|
||||
}
|
||||
|
||||
|
||||
return pRequest;
|
||||
}
|
||||
|
||||
|
@ -730,11 +729,6 @@ static void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
|
|||
taosMemoryFreeClear(pMsgBody);
|
||||
}
|
||||
|
||||
bool persistConnForSpecificMsg(void* parenct, tmsg_t msgType) {
|
||||
return msgType == TDMT_VND_QUERY_RSP || msgType == TDMT_VND_FETCH_RSP || msgType == TDMT_VND_RES_READY_RSP ||
|
||||
msgType == TDMT_VND_QUERY_HEARTBEAT_RSP;
|
||||
}
|
||||
|
||||
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
||||
assert(pMsg->info.ahandle != NULL);
|
||||
|
|
|
@ -125,11 +125,15 @@ static const SSysDbTableSchema userStbsSchema[] = {
|
|||
|
||||
static const SSysDbTableSchema streamSchema[] = {
|
||||
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "dest_table", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "sql", .bytes = 1024, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
};
|
||||
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
|
||||
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
||||
{.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
};
|
||||
|
||||
static const SSysDbTableSchema userTblsSchema[] = {
|
||||
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
|
|
|
@ -665,22 +665,24 @@ void tFreeSMAltertbReq(SMAlterStbReq *pReq) {
|
|||
taosArrayDestroy(pReq->pFields);
|
||||
pReq->pFields = NULL;
|
||||
}
|
||||
int32_t tSerializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) {
|
||||
|
||||
int32_t tSerializeSEpSet(void *buf, int32_t bufLen, const SEpSet *pEpset) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeSEpSet(&encoder, &pReq->epSet) < 0) return -1;
|
||||
if (tEncodeSEpSet(&encoder, pEpset) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
int32_t tDeserializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) {
|
||||
|
||||
int32_t tDeserializeSEpSet(void *buf, int32_t bufLen, SEpSet *pEpset) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeSEpSet(&decoder, &pReq->epSet) < 0) return -1;
|
||||
if (tDecodeSEpSet(&decoder, pEpset) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
tDecoderClear(&decoder);
|
||||
|
@ -3507,31 +3509,6 @@ int32_t tDeserializeSSchedulerHbRsp(void *buf, int32_t bufLen, SSchedulerHbRsp *
|
|||
|
||||
void tFreeSSchedulerHbRsp(SSchedulerHbRsp *pRsp) { taosArrayDestroy(pRsp->taskStatus); }
|
||||
|
||||
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pRsp->code) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSVCreateTbBatchRsp(void *buf, int32_t bufLen, SVCreateTbBatchRsp *pRsp) {
|
||||
// SEncoder encoder = {0};
|
||||
// tEncoderInit(&encoder, buf, bufLen);
|
||||
|
|
|
@ -101,8 +101,6 @@ SArray *qmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSP, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, qmPutNodeMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
|
||||
|
|
|
@ -292,8 +292,6 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_CONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_DISCONNECT, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
// if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_SET_CUR, vmPutNodeMsgToWriteQueue, 0)== NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_RES_READY, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_TASKS_STATUS, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CANCEL_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, vmPutNodeMsgToFetchQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutNodeMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -64,6 +64,8 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
|
|||
} else if (pRpc->msgType == TDMT_MND_SYSTABLE_RETRIEVE_RSP || pRpc->msgType == TDMT_VND_FETCH_RSP) {
|
||||
qWorkerProcessFetchRsp(NULL, NULL, pRpc);
|
||||
return;
|
||||
} else if (pRpc->msgType == TDMT_MND_STATUS_RSP && pEpSet != NULL) {
|
||||
dmSetMnodeEpSet(&pDnode->data, pEpSet);
|
||||
} else {
|
||||
}
|
||||
|
||||
|
@ -204,29 +206,28 @@ static inline void dmSendRsp(SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
static void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
|
||||
SMEpSet msg = {0};
|
||||
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &msg.epSet);
|
||||
SEpSet epSet = {0};
|
||||
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
|
||||
|
||||
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
|
||||
pMsg->pCont = rpcMallocCont(contLen);
|
||||
if (pMsg->pCont == NULL) {
|
||||
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
tSerializeSMEpSet(pMsg->pCont, contLen, &msg);
|
||||
tSerializeSEpSet(pMsg->pCont, contLen, &epSet);
|
||||
pMsg->contLen = contLen;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) {
|
||||
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
|
||||
SMEpSet msg = {.epSet = *pNewEpSet};
|
||||
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
|
||||
int32_t contLen = tSerializeSEpSet(NULL, 0, pNewEpSet);
|
||||
|
||||
rsp.pCont = rpcMallocCont(contLen);
|
||||
if (rsp.pCont == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
|
||||
tSerializeSEpSet(rsp.pCont, contLen, pNewEpSet);
|
||||
rsp.contLen = contLen;
|
||||
}
|
||||
dmSendRsp(&rsp);
|
||||
|
|
|
@ -326,6 +326,7 @@ void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet
|
|||
}
|
||||
|
||||
void dmSetMnodeEpSet(SDnodeData *pData, SEpSet *pEpSet) {
|
||||
if (memcmp(pEpSet, &pData->mnodeEps, sizeof(SEpSet)) == 0) return;
|
||||
taosThreadRwlockWrlock(&pData->lock);
|
||||
pData->mnodeEps = *pEpSet;
|
||||
taosThreadRwlockUnlock(&pData->lock);
|
||||
|
|
|
@ -124,6 +124,11 @@ typedef enum {
|
|||
TRN_POLICY_RETRY = 1,
|
||||
} ETrnPolicy;
|
||||
|
||||
typedef enum {
|
||||
TRN_EXEC_PARALLEL = 0,
|
||||
TRN_EXEC_ONE_BY_ONE = 1,
|
||||
} ETrnExecType;
|
||||
|
||||
typedef enum {
|
||||
DND_REASON_ONLINE = 0,
|
||||
DND_REASON_STATUS_MSG_TIMEOUT,
|
||||
|
@ -152,6 +157,7 @@ typedef struct {
|
|||
ETrnStage stage;
|
||||
ETrnPolicy policy;
|
||||
ETrnType type;
|
||||
ETrnExecType parallel;
|
||||
int32_t code;
|
||||
int32_t failedTimes;
|
||||
SRpcHandleInfo rpcInfo;
|
||||
|
|
|
@ -81,6 +81,7 @@ typedef struct {
|
|||
bool standby;
|
||||
bool restored;
|
||||
int32_t errCode;
|
||||
int32_t transId;
|
||||
} SSyncMgmt;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -25,7 +25,7 @@ extern "C" {
|
|||
int32_t mndInitSync(SMnode *pMnode);
|
||||
void mndCleanupSync(SMnode *pMnode);
|
||||
bool mndIsMaster(SMnode *pMnode);
|
||||
int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw);
|
||||
int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId);
|
||||
void mndSyncStart(SMnode *pMnode);
|
||||
void mndSyncStop(SMnode *pMnode);
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
|
|||
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
|
||||
void mndTransSetCb(STrans *pTrans, ETrnFuncType startFunc, ETrnFuncType stopFunc, void *param, int32_t paramLen);
|
||||
void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb);
|
||||
void mndTransSetExecOneByOne(STrans *pTrans);
|
||||
|
||||
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans);
|
||||
void mndTransProcessRsp(SRpcMsg *pRsp);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "mndShow.h"
|
||||
#include "mndTrans.h"
|
||||
#include "mndUser.h"
|
||||
#include "mndSync.h"
|
||||
|
||||
#define MNODE_VER_NUMBER 1
|
||||
#define MNODE_RESERVE_SIZE 64
|
||||
|
@ -222,23 +223,24 @@ bool mndIsMnode(SMnode *pMnode, int32_t dnodeId) {
|
|||
}
|
||||
|
||||
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
pEpSet->numOfEps = 0;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE);
|
||||
void *pIter = NULL;
|
||||
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
SMnodeObj *pObj = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
|
||||
if (pIter == NULL) break;
|
||||
if (pObj->pDnode == NULL) {
|
||||
mError("mnode:%d, no corresponding dnode exists", pObj->id);
|
||||
} else {
|
||||
if (pObj->id == pMnode->selfDnodeId || pObj->state == TAOS_SYNC_STATE_LEADER) {
|
||||
|
||||
if (pObj->id == pMnode->selfDnodeId) {
|
||||
if (mndIsMaster(pMnode)) {
|
||||
pEpSet->inUse = pEpSet->numOfEps;
|
||||
} else {
|
||||
pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes;
|
||||
}
|
||||
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
||||
sdbRelease(pSdb, pObj);
|
||||
}
|
||||
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
||||
sdbRelease(pSdb, pObj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -312,25 +314,6 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
|
|||
createEpset.eps[0].port = pDnode->port;
|
||||
memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
|
||||
|
||||
{
|
||||
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq);
|
||||
void *pReq = taosMemoryMalloc(contLen);
|
||||
tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq);
|
||||
|
||||
STransAction action = {
|
||||
.epSet = alterEpset,
|
||||
.pCont = pReq,
|
||||
.contLen = contLen,
|
||||
.msgType = TDMT_DND_ALTER_MNODE,
|
||||
.acceptableCode = 0,
|
||||
};
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &createReq);
|
||||
void *pReq = taosMemoryMalloc(contLen);
|
||||
|
@ -350,6 +333,25 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
|
|||
}
|
||||
}
|
||||
|
||||
{
|
||||
int32_t contLen = tSerializeSDCreateMnodeReq(NULL, 0, &alterReq);
|
||||
void *pReq = taosMemoryMalloc(contLen);
|
||||
tSerializeSDCreateMnodeReq(pReq, contLen, &alterReq);
|
||||
|
||||
STransAction action = {
|
||||
.epSet = alterEpset,
|
||||
.pCont = pReq,
|
||||
.contLen = contLen,
|
||||
.msgType = TDMT_DND_ALTER_MNODE,
|
||||
.acceptableCode = 0,
|
||||
};
|
||||
|
||||
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -365,6 +367,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
|
|||
if (pTrans == NULL) goto _OVER;
|
||||
|
||||
mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
|
||||
mndTransSetExecOneByOne(pTrans);
|
||||
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
|
||||
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
|
||||
if (mndSetCreateMnodeRedoActions(pMnode, pTrans, pDnode, &mnodeObj) != 0) goto _OVER;
|
||||
|
@ -536,7 +539,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
|
|||
if (pTrans == NULL) goto _OVER;
|
||||
|
||||
mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
|
||||
|
||||
mndTransSetExecOneByOne(pTrans);
|
||||
if (mndSetDropMnodeRedoLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
|
||||
if (mndSetDropMnodeCommitLogs(pMnode, pTrans, pObj) != 0) goto _OVER;
|
||||
if (mndSetDropMnodeRedoActions(pMnode, pTrans, pObj->pDnode, pObj) != 0) goto _OVER;
|
||||
|
@ -701,14 +704,17 @@ static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) {
|
|||
}
|
||||
}
|
||||
|
||||
mTrace("trans:-1, sync reconfig will be proposed");
|
||||
|
||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
pMgmt->standby = 0;
|
||||
int32_t code = syncReconfig(pMgmt->sync, &cfg);
|
||||
if (code != 0) {
|
||||
mError("failed to alter mnode sync since %s", terrstr());
|
||||
mError("trans:-1, failed to propose sync reconfig since %s", terrstr());
|
||||
return code;
|
||||
} else {
|
||||
pMgmt->errCode = 0;
|
||||
pMgmt->transId = -1;
|
||||
tsem_wait(&pMgmt->syncSem);
|
||||
mInfo("alter mnode sync result:%s", tstrerror(pMgmt->errCode));
|
||||
terrno = pMgmt->errCode;
|
||||
|
|
|
@ -507,6 +507,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
|||
|
||||
mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name);
|
||||
mndTransSetDbInfo(pTrans, pDb);
|
||||
mndTransSetExecOneByOne(pTrans);
|
||||
|
||||
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
|
||||
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
|
||||
|
|
|
@ -28,16 +28,26 @@ int32_t mndSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
|
|||
int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) { return tmsgSendReq(pEpSet, pMsg); }
|
||||
|
||||
void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
|
||||
SMnode *pMnode = pFsm->data;
|
||||
SSdbRaw *pRaw = pMsg->pCont;
|
||||
SMnode *pMnode = pFsm->data;
|
||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
SSdbRaw *pRaw = pMsg->pCont;
|
||||
|
||||
mTrace("raw:%p, apply to sdb, ver:%" PRId64 " term:%" PRId64 " role:%s", pRaw, cbMeta.index, cbMeta.term,
|
||||
syncStr(cbMeta.state));
|
||||
sdbWriteWithoutFree(pMnode->pSdb, pRaw);
|
||||
sdbSetApplyIndex(pMnode->pSdb, cbMeta.index);
|
||||
sdbSetApplyTerm(pMnode->pSdb, cbMeta.term);
|
||||
if (cbMeta.state == TAOS_SYNC_STATE_LEADER) {
|
||||
tsem_post(&pMnode->syncMgmt.syncSem);
|
||||
int32_t transId = sdbGetIdFromRaw(pRaw);
|
||||
pMgmt->errCode = cbMeta.code;
|
||||
mTrace("trans:%d, is proposed, savedTransId:%d code:0x%x, ver:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId,
|
||||
pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw);
|
||||
|
||||
if (pMgmt->errCode == 0) {
|
||||
sdbWriteWithoutFree(pMnode->pSdb, pRaw);
|
||||
sdbSetApplyIndex(pMnode->pSdb, cbMeta.index);
|
||||
sdbSetApplyTerm(pMnode->pSdb, cbMeta.term);
|
||||
}
|
||||
|
||||
if (pMgmt->transId == transId) {
|
||||
if (pMgmt->errCode != 0) {
|
||||
mError("trans:%d, failed to propose since %s", transId, tstrerror(pMgmt->errCode));
|
||||
}
|
||||
tsem_post(&pMgmt->syncSem);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,11 +88,19 @@ int32_t mndSnapshotApply(struct SSyncFSM* pFsm, const SSnapshot* pSnapshot, char
|
|||
}
|
||||
|
||||
void mndReConfig(struct SSyncFSM *pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
|
||||
mInfo("mndReConfig cbMeta.code:%d, cbMeta.currentTerm:%" PRId64 ", cbMeta.term:%" PRId64 ", cbMeta.index:%" PRId64,
|
||||
cbMeta.code, cbMeta.currentTerm, cbMeta.term, cbMeta.index);
|
||||
SMnode *pMnode = pFsm->data;
|
||||
pMnode->syncMgmt.errCode = cbMeta.code;
|
||||
tsem_post(&pMnode->syncMgmt.syncSem);
|
||||
SMnode *pMnode = pFsm->data;
|
||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
|
||||
pMgmt->errCode = cbMeta.code;
|
||||
mInfo("trans:-1, sync reconfig is proposed, savedTransId:%d code:0x%x, curTerm:%" PRId64 " term:%" PRId64,
|
||||
pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term);
|
||||
|
||||
if (pMgmt->transId == -1) {
|
||||
if (pMgmt->errCode != 0) {
|
||||
mError("trans:-1, failed to propose sync reconfig since %s", tstrerror(pMgmt->errCode));
|
||||
}
|
||||
tsem_post(&pMgmt->syncSem);
|
||||
}
|
||||
}
|
||||
|
||||
SSyncFSM *mndSyncMakeFsm(SMnode *pMnode) {
|
||||
|
@ -165,15 +183,17 @@ void mndCleanupSync(SMnode *pMnode) {
|
|||
memset(pMgmt, 0, sizeof(SSyncMgmt));
|
||||
}
|
||||
|
||||
int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) {
|
||||
int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
|
||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
pMgmt->errCode = 0;
|
||||
|
||||
SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)};
|
||||
SRpcMsg rsp = {.code = TDMT_MND_APPLY_MSG, .contLen = sdbGetRawTotalSize(pRaw)};
|
||||
rsp.pCont = rpcMallocCont(rsp.contLen);
|
||||
if (rsp.pCont == NULL) return -1;
|
||||
memcpy(rsp.pCont, pRaw, rsp.contLen);
|
||||
|
||||
pMgmt->errCode = 0;
|
||||
pMgmt->transId = transId;
|
||||
mTrace("trans:%d, will be proposed", pMgmt->transId);
|
||||
|
||||
const bool isWeak = false;
|
||||
int32_t code = syncPropose(pMgmt->sync, &rsp, isWeak);
|
||||
if (code == 0) {
|
||||
|
@ -187,7 +207,11 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw) {
|
|||
}
|
||||
|
||||
rpcFreeCont(rsp.pCont);
|
||||
if (code != 0) return code;
|
||||
if (code != 0) {
|
||||
mError("trans:%d, failed to propose, code:0x%x", pMgmt->transId, code);
|
||||
return code;
|
||||
}
|
||||
|
||||
return pMgmt->errCode;
|
||||
}
|
||||
|
||||
|
@ -212,6 +236,17 @@ void mndSyncStop(SMnode *pMnode) {}
|
|||
|
||||
bool mndIsMaster(SMnode *pMnode) {
|
||||
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
|
||||
|
||||
ESyncState state = syncGetMyRole(pMgmt->sync);
|
||||
return (state == TAOS_SYNC_STATE_LEADER) && (pMnode->syncMgmt.restored);
|
||||
if (state != TAOS_SYNC_STATE_LEADER) {
|
||||
terrno = TSDB_CODE_SYN_NOT_LEADER;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!pMgmt->restored) {
|
||||
terrno = TSDB_CODE_APP_NOT_READY;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -140,6 +140,7 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
|
|||
SDB_SET_INT16(pRaw, dataPos, stage, _OVER)
|
||||
SDB_SET_INT16(pRaw, dataPos, pTrans->policy, _OVER)
|
||||
SDB_SET_INT16(pRaw, dataPos, pTrans->type, _OVER)
|
||||
SDB_SET_INT16(pRaw, dataPos, pTrans->parallel, _OVER)
|
||||
SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER)
|
||||
SDB_SET_INT64(pRaw, dataPos, pTrans->dbUid, _OVER)
|
||||
SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER)
|
||||
|
@ -245,12 +246,15 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
|
|||
int16_t stage = 0;
|
||||
int16_t policy = 0;
|
||||
int16_t type = 0;
|
||||
int16_t parallel = 0;
|
||||
SDB_GET_INT16(pRaw, dataPos, &stage, _OVER)
|
||||
SDB_GET_INT16(pRaw, dataPos, &policy, _OVER)
|
||||
SDB_GET_INT16(pRaw, dataPos, &type, _OVER)
|
||||
SDB_GET_INT16(pRaw, dataPos, ¶llel, _OVER)
|
||||
pTrans->stage = stage;
|
||||
pTrans->policy = policy;
|
||||
pTrans->type = type;
|
||||
pTrans->parallel = parallel;
|
||||
SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER)
|
||||
SDB_GET_INT64(pRaw, dataPos, &pTrans->dbUid, _OVER)
|
||||
SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER)
|
||||
|
@ -665,6 +669,8 @@ void mndTransSetDbInfo(STrans *pTrans, SDbObj *pDb) {
|
|||
memcpy(pTrans->dbname, pDb->name, TSDB_DB_FNAME_LEN);
|
||||
}
|
||||
|
||||
void mndTransSetExecOneByOne(STrans *pTrans) { pTrans->parallel = TRN_EXEC_ONE_BY_ONE; }
|
||||
|
||||
static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
|
||||
SSdbRaw *pRaw = mndTransActionEncode(pTrans);
|
||||
if (pRaw == NULL) {
|
||||
|
@ -674,7 +680,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
|
|||
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||
|
||||
mDebug("trans:%d, sync to other nodes", pTrans->id);
|
||||
int32_t code = mndSyncPropose(pMnode, pRaw);
|
||||
int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id);
|
||||
if (code != 0) {
|
||||
mError("trans:%d, failed to sync since %s", pTrans->id, terrstr());
|
||||
sdbFreeRaw(pRaw);
|
||||
|
@ -970,7 +976,18 @@ static int32_t mndTransSendActionMsg(SMnode *pMnode, STrans *pTrans, SArray *pAr
|
|||
for (int32_t action = 0; action < numOfActions; ++action) {
|
||||
STransAction *pAction = taosArrayGet(pArray, action);
|
||||
if (pAction == NULL) continue;
|
||||
if (pAction->msgSent) continue;
|
||||
|
||||
if (pAction->msgSent) {
|
||||
if (pAction->msgReceived) {
|
||||
continue;
|
||||
} else {
|
||||
if (pTrans->parallel == TRN_EXEC_ONE_BY_ONE) {
|
||||
break;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int64_t signature = pTrans->id;
|
||||
signature = (signature << 32);
|
||||
|
@ -990,6 +1007,9 @@ static int32_t mndTransSendActionMsg(SMnode *pMnode, STrans *pTrans, SArray *pAr
|
|||
pAction->msgSent = 1;
|
||||
pAction->msgReceived = 0;
|
||||
pAction->errCode = 0;
|
||||
if (pTrans->parallel == TRN_EXEC_ONE_BY_ONE) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
pAction->msgSent = 0;
|
||||
pAction->msgReceived = 0;
|
||||
|
|
|
@ -408,46 +408,74 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t mndProcessMsg(SRpcMsg *pMsg) {
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
void *ahandle = pMsg->info.ahandle;
|
||||
mTrace("msg:%p, will be processed, type:%s app:%p", pMsg, TMSG_INFO(pMsg->msgType), ahandle);
|
||||
static int32_t mndCheckMnodeMaster(SRpcMsg *pMsg) {
|
||||
if (!IsReq(pMsg)) return 0;
|
||||
if (mndIsMaster(pMsg->info.node)) return 0;
|
||||
|
||||
if (IsReq(pMsg)) {
|
||||
if (!mndIsMaster(pMnode)) {
|
||||
terrno = TSDB_CODE_APP_NOT_READY;
|
||||
mDebug("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
|
||||
return -1;
|
||||
}
|
||||
if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER ||
|
||||
pMsg->msgType == TDMT_MND_TRANS_TIMER) {
|
||||
return -1;
|
||||
}
|
||||
mError("msg:%p, failed to check master since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
|
||||
TMSG_INFO(pMsg->msgType));
|
||||
|
||||
if (pMsg->contLen == 0 || pMsg->pCont == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
|
||||
return -1;
|
||||
SEpSet epSet = {0};
|
||||
mndGetMnodeEpSet(pMsg->info.node, &epSet);
|
||||
|
||||
#if 0
|
||||
mTrace("msg:%p, is redirected, num:%d use:%d", pMsg, epSet.numOfEps, epSet.inUse);
|
||||
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
|
||||
mTrace("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
|
||||
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
|
||||
epSet.inUse = (i + 1) % epSet.numOfEps;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
|
||||
pMsg->info.rsp = rpcMallocCont(contLen);
|
||||
if (pMsg->info.rsp != NULL) {
|
||||
tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
|
||||
pMsg->info.rspLen = contLen;
|
||||
terrno = TSDB_CODE_RPC_REDIRECT;
|
||||
} else {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int32_t mndCheckRequestValid(SRpcMsg *pMsg) {
|
||||
if (!IsReq(pMsg)) return 0;
|
||||
if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0;
|
||||
|
||||
mError("msg:%p, failed to valid request, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t mndProcessMsg(SRpcMsg *pMsg) {
|
||||
if (mndCheckMnodeMaster(pMsg) != 0) return -1;
|
||||
if (mndCheckRequestValid(pMsg) != 0) return -1;
|
||||
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)];
|
||||
if (fp == NULL) {
|
||||
mError("msg:%p, failed to get msg handle, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
|
||||
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
|
||||
mError("msg:%p, failed to process since no msg handle, app:%p", pMsg, ahandle);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mTrace("msg:%p, will be processed in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
|
||||
int32_t code = (*fp)(pMsg);
|
||||
if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
terrno = code;
|
||||
mTrace("msg:%p, in progress, app:%p", pMsg, ahandle);
|
||||
} else if (code != 0) {
|
||||
if (terrno != TSDB_CODE_OPS_NOT_SUPPORT) {
|
||||
mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
|
||||
} else {
|
||||
mTrace("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
|
||||
}
|
||||
mTrace("msg:%p, won't response immediately since in progress", pMsg);
|
||||
} else if (code == 0) {
|
||||
mTrace("msg:%p, successfully processed and response", pMsg);
|
||||
} else {
|
||||
mTrace("msg:%p, is processed, app:%p", pMsg, ahandle);
|
||||
mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
|
||||
TMSG_INFO(pMsg->msgType));
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -386,6 +386,8 @@ SSdbIter *sdbIterRead(SSdb *pSdb, SSdbIter *iter, char **ppBuf, int32_t *len);
|
|||
const char *sdbTableName(ESdbType type);
|
||||
void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper);
|
||||
|
||||
int32_t sdbGetIdFromRaw(SSdbRaw *pRaw);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -16,6 +16,11 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "sdb.h"
|
||||
|
||||
int32_t sdbGetIdFromRaw(SSdbRaw *pRaw) {
|
||||
int32_t id = *((int32_t *)(pRaw->pData));
|
||||
return id;
|
||||
}
|
||||
|
||||
SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) {
|
||||
SSdbRaw *pRaw = taosMemoryCalloc(1, dataLen + sizeof(SSdbRaw));
|
||||
if (pRaw == NULL) {
|
||||
|
|
|
@ -60,21 +60,12 @@ int32_t qndProcessQueryMsg(SQnode *pQnode, SRpcMsg *pMsg) {
|
|||
case TDMT_VND_FETCH_RSP:
|
||||
code = qWorkerProcessFetchRsp(pQnode, pQnode->pQuery, pMsg);
|
||||
break;
|
||||
case TDMT_VND_RES_READY:
|
||||
code = qWorkerProcessReadyMsg(pQnode, pQnode->pQuery, pMsg);
|
||||
break;
|
||||
case TDMT_VND_TASKS_STATUS:
|
||||
code = qWorkerProcessStatusMsg(pQnode, pQnode->pQuery, pMsg);
|
||||
break;
|
||||
case TDMT_VND_CANCEL_TASK:
|
||||
code = qWorkerProcessCancelMsg(pQnode, pQnode->pQuery, pMsg);
|
||||
break;
|
||||
case TDMT_VND_DROP_TASK:
|
||||
code = qWorkerProcessDropMsg(pQnode, pQnode->pQuery, pMsg);
|
||||
break;
|
||||
case TDMT_VND_TABLE_META:
|
||||
// code = vnodeGetTableMeta(pQnode, pMsg);
|
||||
// break;
|
||||
case TDMT_VND_CONSUME:
|
||||
// code = tqProcessConsumeReq(pQnode->pTq, pMsg);
|
||||
// break;
|
||||
|
|
|
@ -209,10 +209,6 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
|||
return qWorkerProcessFetchMsg(pVnode, pVnode->pQuery, pMsg);
|
||||
case TDMT_VND_FETCH_RSP:
|
||||
return qWorkerProcessFetchRsp(pVnode, pVnode->pQuery, pMsg);
|
||||
case TDMT_VND_RES_READY:
|
||||
return qWorkerProcessReadyMsg(pVnode, pVnode->pQuery, pMsg);
|
||||
case TDMT_VND_TASKS_STATUS:
|
||||
return qWorkerProcessStatusMsg(pVnode, pVnode->pQuery, pMsg);
|
||||
case TDMT_VND_CANCEL_TASK:
|
||||
return qWorkerProcessCancelMsg(pVnode, pVnode->pQuery, pMsg);
|
||||
case TDMT_VND_DROP_TASK:
|
||||
|
|
|
@ -882,6 +882,8 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
|
|||
// TODO temporarily used, when the statement of "partition by tbname" is ready, remove this
|
||||
if (pInfo->assignBlockUid) {
|
||||
pInfo->pRes->info.groupId = uid;
|
||||
} else {
|
||||
pInfo->pRes->info.groupId = groupId;
|
||||
}
|
||||
|
||||
int32_t numOfCols = pInfo->pRes->info.numOfCols;
|
||||
|
|
|
@ -1128,7 +1128,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
continue;
|
||||
}
|
||||
|
||||
pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, 0);
|
||||
pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId);
|
||||
}
|
||||
|
||||
finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset);
|
||||
|
|
|
@ -489,7 +489,7 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32
|
|||
|
||||
static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
||||
|
@ -621,8 +621,10 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
|||
|
||||
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||
|
||||
if (pValue->datum.i < ((i > 1) ? 0 : 1) || pValue->datum.i > 1000) {
|
||||
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
if (pValue->datum.i < ((i > 1) ? 0 : 1) || pValue->datum.i > 100) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"TAIL function second parameter should be in range [1, 100], "
|
||||
"third parameter should be in range [0, 100]");
|
||||
}
|
||||
|
||||
pValue->notReserved = true;
|
||||
|
@ -678,17 +680,41 @@ static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
|||
}
|
||||
|
||||
static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
int32_t paraLen = LIST_LENGTH(pFunc->pParameterList);
|
||||
if (paraLen == 0 || paraLen > 2) {
|
||||
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||
if (numOfParams == 0 || numOfParams > 2) {
|
||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
||||
if (!IS_SIGNED_NUMERIC_TYPE(p1->resType.type) && !IS_FLOAT_TYPE(p1->resType.type) &&
|
||||
TSDB_DATA_TYPE_BOOL != p1->resType.type) {
|
||||
//param0
|
||||
SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0);
|
||||
if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"The first parameter of DIFF function can only be column");
|
||||
}
|
||||
|
||||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) &&
|
||||
TSDB_DATA_TYPE_BOOL != colType) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
pFunc->node.resType = p1->resType;
|
||||
|
||||
//param1
|
||||
if (numOfParams == 2) {
|
||||
SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
|
||||
if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
SValueNode* pValue = (SValueNode*)pParamNode1;
|
||||
if (pValue->datum.i != 0 && pValue->datum.i != 1) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"Second parameter of DIFF function should be only 0 or 1");
|
||||
}
|
||||
|
||||
pValue->notReserved = true;
|
||||
}
|
||||
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -2349,7 +2349,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo
|
|||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float v = *(float*)pv;
|
||||
float delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null
|
||||
if (delta < 0 && pDiffInfo->ignoreNegative) {
|
||||
if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow
|
||||
colDataSetNull_f(pOutput->nullbitmap, pos);
|
||||
} else {
|
||||
colDataAppendFloat(pOutput, pos, &delta);
|
||||
|
@ -2360,7 +2360,7 @@ static void doHandleDiff(SDiffInfo* pDiffInfo, int32_t type, const char* pv, SCo
|
|||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double v = *(double*)pv;
|
||||
double delta = factor*(v - pDiffInfo->prev.d64); // direct previous may be null
|
||||
if (delta < 0 && pDiffInfo->ignoreNegative) {
|
||||
if ((delta < 0 && pDiffInfo->ignoreNegative) || isinf(delta) || isnan(delta)) { //check for overflow
|
||||
colDataSetNull_f(pOutput->nullbitmap, pos);
|
||||
} else {
|
||||
colDataAppendDouble(pOutput, pos, &delta);
|
||||
|
@ -3530,7 +3530,12 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) {
|
|||
double v;
|
||||
GET_TYPED_DATA(v, double, type, data);
|
||||
pSumRes->dsum += v;
|
||||
colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false);
|
||||
//check for overflow
|
||||
if (isinf(pSumRes->dsum) || isnan(pSumRes->dsum)) {
|
||||
colDataAppendNULL(pOutput, pos);
|
||||
} else {
|
||||
colDataAppend(pOutput, pos, (char *)&pSumRes->dsum, false);
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: remove this after pTsOutput is handled
|
||||
|
@ -3604,7 +3609,12 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
pInfo->points[pInfo->pos] = v;
|
||||
double result = pInfo->sum / pInfo->numOfPoints;
|
||||
colDataAppend(pOutput, pos, (char *)&result, false);
|
||||
//check for overflow
|
||||
if (isinf(result) || isnan(result)) {
|
||||
colDataAppendNULL(pOutput, pos);
|
||||
} else {
|
||||
colDataAppend(pOutput, pos, (char *)&result, false);
|
||||
}
|
||||
|
||||
//TODO: remove this after pTsOutput is handled
|
||||
if (pTsOutput != NULL) {
|
||||
|
|
|
@ -27,13 +27,14 @@ extern "C" {
|
|||
#include "querynodes.h"
|
||||
|
||||
typedef struct SAstCreateContext {
|
||||
SParseContext* pQueryCxt;
|
||||
SMsgBuf msgBuf;
|
||||
bool notSupport;
|
||||
SNode* pRootNode;
|
||||
int16_t placeholderNo;
|
||||
SArray* pPlaceholderValues;
|
||||
int32_t errCode;
|
||||
SParseContext* pQueryCxt;
|
||||
SMsgBuf msgBuf;
|
||||
bool notSupport;
|
||||
SNode* pRootNode;
|
||||
int16_t placeholderNo;
|
||||
SArray* pPlaceholderValues;
|
||||
int32_t errCode;
|
||||
SParseMetaCache* pMetaCache;
|
||||
} SAstCreateContext;
|
||||
|
||||
typedef enum EDatabaseOptionType {
|
||||
|
@ -74,7 +75,7 @@ typedef struct SAlterOption {
|
|||
|
||||
extern SToken nil_token;
|
||||
|
||||
void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt);
|
||||
int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt);
|
||||
|
||||
SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* pNode);
|
||||
SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const SToken* pEnd, SNode* pNode);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "catalog.h"
|
||||
#include "os.h"
|
||||
#include "query.h"
|
||||
|
||||
|
@ -37,6 +38,13 @@ typedef struct SMsgBuf {
|
|||
char* buf;
|
||||
} SMsgBuf;
|
||||
|
||||
typedef struct SParseMetaCache {
|
||||
SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
|
||||
SHashObj* pDbVgroup; // key is dbFName, element is SArray<SVgroupInfo>*
|
||||
SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
|
||||
SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo
|
||||
} SParseMetaCache;
|
||||
|
||||
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
|
||||
int32_t buildInvalidOperationMsg(SMsgBuf* pMsgBuf, const char* msg);
|
||||
int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr);
|
||||
|
@ -47,10 +55,20 @@ int32_t getNumOfColumns(const STableMeta* pTableMeta);
|
|||
int32_t getNumOfTags(const STableMeta* pTableMeta);
|
||||
STableComInfo getTableInfo(const STableMeta* pTableMeta);
|
||||
STableMeta* tableMetaDup(const STableMeta* pTableMeta);
|
||||
int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* errMsg, int16_t startColId);
|
||||
int32_t parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* errMsg, int16_t startColId);
|
||||
|
||||
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
|
||||
|
||||
int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
|
||||
int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache);
|
||||
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
|
||||
int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta);
|
||||
int32_t getDBVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo);
|
||||
int32_t getTableHashVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup);
|
||||
int32_t getDBVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
|
||||
int32_t* pTableNum);
|
||||
int32_t getDBCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -38,7 +38,8 @@
|
|||
|
||||
SToken nil_token = {.type = TK_NK_NIL, .n = 0, .z = NULL};
|
||||
|
||||
void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
|
||||
int32_t initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
|
||||
memset(pCxt, 0, sizeof(SAstCreateContext));
|
||||
pCxt->pQueryCxt = pParseCxt;
|
||||
pCxt->msgBuf.buf = pParseCxt->pMsg;
|
||||
pCxt->msgBuf.len = pParseCxt->msgLen;
|
||||
|
@ -47,6 +48,13 @@ void initAstCreateContext(SParseContext* pParseCxt, SAstCreateContext* pCxt) {
|
|||
pCxt->placeholderNo = 0;
|
||||
pCxt->pPlaceholderValues = NULL;
|
||||
pCxt->errCode = TSDB_CODE_SUCCESS;
|
||||
if (pParseCxt->async) {
|
||||
pCxt->pMetaCache = taosMemoryCalloc(1, sizeof(SParseMetaCache));
|
||||
if (NULL == pCxt->pMetaCache) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void copyStringFormStringToken(SToken* pToken, char* pBuf, int32_t len) {
|
||||
|
@ -464,6 +472,13 @@ SNode* createRealTableNode(SAstCreateContext* pCxt, SToken* pDbName, SToken* pTa
|
|||
strncpy(realTable->table.tableAlias, pTableName->z, pTableName->n);
|
||||
}
|
||||
strncpy(realTable->table.tableName, pTableName->z, pTableName->n);
|
||||
if (NULL != pCxt->pMetaCache) {
|
||||
if (TSDB_CODE_SUCCESS != reserveTableMetaInCache(pCxt->pQueryCxt->acctId, realTable->table.dbName,
|
||||
realTable->table.tableName, pCxt->pMetaCache)) {
|
||||
nodesDestroyNode(realTable);
|
||||
CHECK_OUT_OF_MEM(NULL);
|
||||
}
|
||||
}
|
||||
return (SNode*)realTable;
|
||||
}
|
||||
|
||||
|
|
|
@ -82,6 +82,7 @@ abort_parse:
|
|||
(*pQuery)->pRoot = cxt.pRootNode;
|
||||
(*pQuery)->placeholderNum = cxt.placeholderNo;
|
||||
TSWAP((*pQuery)->pPlaceholderValues, cxt.pPlaceholderValues);
|
||||
TSWAP((*pQuery)->pMetaCache, cxt.pMetaCache);
|
||||
}
|
||||
taosArrayDestroy(cxt.pPlaceholderValues);
|
||||
return cxt.errCode;
|
||||
|
|
|
@ -40,6 +40,7 @@ typedef struct STranslateContext {
|
|||
SHashObj* pDbs;
|
||||
SHashObj* pTables;
|
||||
SExplainOptions* pExplainOpt;
|
||||
SParseMetaCache* pMetaCache;
|
||||
} STranslateContext;
|
||||
|
||||
typedef struct SFullDatabaseName {
|
||||
|
@ -102,12 +103,17 @@ static int32_t collectUseTable(const SName* pName, SHashObj* pDbs) {
|
|||
|
||||
static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STableMeta** pMeta) {
|
||||
SParseContext* pParCxt = pCxt->pParseCxt;
|
||||
int32_t code = collectUseDatabase(pName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(pName, pCxt->pTables);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pParCxt->async) {
|
||||
code = getTableMetaFromCache(pCxt->pMetaCache, pName, pMeta);
|
||||
} else {
|
||||
code = collectUseDatabase(pName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(pName, pCxt->pTables);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pMeta);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
|
||||
|
@ -126,8 +132,13 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName,
|
|||
SParseContext* pParCxt = pCxt->pParseCxt;
|
||||
SName name;
|
||||
toName(pCxt->pParseCxt->acctId, pDbName, pTableName, &name);
|
||||
int32_t code =
|
||||
catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pParCxt->async) {
|
||||
code = getTableMetaFromCache(pCxt->pMetaCache, &name, pMeta);
|
||||
} else {
|
||||
code =
|
||||
catalogRefreshGetTableMeta(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, &name, pMeta, false);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pDbName,
|
||||
pTableName);
|
||||
|
@ -135,29 +146,18 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName,
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t getTableDistVgInfo(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) {
|
||||
SParseContext* pParCxt = pCxt->pParseCxt;
|
||||
int32_t code = collectUseDatabase(pName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(pName, pCxt->pTables);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetTableDistVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pVgInfo);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("catalogGetTableDistVgInfo error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
|
||||
pName->tname);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArray** pVgInfo) {
|
||||
SParseContext* pParCxt = pCxt->pParseCxt;
|
||||
char fullDbName[TSDB_DB_FNAME_LEN];
|
||||
tNameGetFullDbName(pName, fullDbName);
|
||||
int32_t code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pParCxt->async) {
|
||||
code = getDBVgInfoFromCache(pCxt->pMetaCache, fullDbName, pVgInfo);
|
||||
} else {
|
||||
code = collectUseDatabaseImpl(fullDbName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetDBVgInfo(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, fullDbName, pVgInfo);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("catalogGetDBVgInfo error, code:%s, dbFName:%s", tstrerror(code), fullDbName);
|
||||
|
@ -175,12 +175,17 @@ static int32_t getDBVgInfo(STranslateContext* pCxt, const char* pDbName, SArray*
|
|||
|
||||
static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pName, SVgroupInfo* pInfo) {
|
||||
SParseContext* pParCxt = pCxt->pParseCxt;
|
||||
int32_t code = collectUseDatabase(pName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(pName, pCxt->pTables);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pParCxt->async) {
|
||||
code = getTableHashVgroupFromCache(pCxt->pMetaCache, pName, pInfo);
|
||||
} else {
|
||||
code = collectUseDatabase(pName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(pName, pCxt->pTables);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetTableHashVgroup(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, pName, pInfo);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
|
||||
|
@ -198,9 +203,14 @@ static int32_t getTableHashVgroup(STranslateContext* pCxt, const char* pDbName,
|
|||
static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
|
||||
int32_t* pTableNum) {
|
||||
SParseContext* pParCxt = pCxt->pParseCxt;
|
||||
int32_t code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pParCxt->async) {
|
||||
code = getDBVgVersionFromCache(pCxt->pMetaCache, pDbFName, pVersion, pDbId, pTableNum);
|
||||
} else {
|
||||
code = collectUseDatabaseImpl(pDbFName, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetDBVgVersion(pParCxt->pCatalog, pDbFName, pVersion, pDbId, pTableNum);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("catalogGetDBVgVersion error, code:%s, dbFName:%s", tstrerror(code), pDbFName);
|
||||
|
@ -214,9 +224,14 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
|
|||
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName));
|
||||
char dbFname[TSDB_DB_FNAME_LEN] = {0};
|
||||
tNameGetFullDbName(&name, dbFname);
|
||||
int32_t code = collectUseDatabaseImpl(dbFname, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetDBCfg(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, dbFname, pInfo);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pParCxt->async) {
|
||||
code = getDBCfgFromCache(pCxt->pMetaCache, dbFname, pInfo);
|
||||
} else {
|
||||
code = collectUseDatabaseImpl(dbFname, pCxt->pDbs);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = catalogGetDBCfg(pParCxt->pCatalog, pParCxt->pTransporter, &pParCxt->mgmtEpSet, dbFname, pInfo);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
parserError("catalogGetDBCfg error, code:%s, dbFName:%s", tstrerror(code), dbFname);
|
||||
|
@ -224,7 +239,7 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t initTranslateContext(SParseContext* pParseCxt, STranslateContext* pCxt) {
|
||||
static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* pMetaCache, STranslateContext* pCxt) {
|
||||
pCxt->pParseCxt = pParseCxt;
|
||||
pCxt->errCode = TSDB_CODE_SUCCESS;
|
||||
pCxt->msgBuf.buf = pParseCxt->pMsg;
|
||||
|
@ -232,6 +247,7 @@ static int32_t initTranslateContext(SParseContext* pParseCxt, STranslateContext*
|
|||
pCxt->pNsLevel = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES);
|
||||
pCxt->currLevel = 0;
|
||||
pCxt->currClause = 0;
|
||||
pCxt->pMetaCache = pMetaCache;
|
||||
pCxt->pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
pCxt->pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
if (NULL == pCxt->pNsLevel || NULL == pCxt->pDbs || NULL == pCxt->pTables) {
|
||||
|
@ -1225,7 +1241,7 @@ static int32_t setTableVgroupList(STranslateContext* pCxt, SName* pName, SRealTa
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
|
||||
SArray* vgroupList = NULL;
|
||||
code = getTableDistVgInfo(pCxt, pName, &vgroupList);
|
||||
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = toVgroupsInfo(vgroupList, &pRealTable->pVgroupList);
|
||||
}
|
||||
|
@ -2797,7 +2813,7 @@ static int32_t buildRollupAst(STranslateContext* pCxt, SCreateTableStmt* pStmt,
|
|||
for (int32_t i = 1; i < num; ++i) {
|
||||
SRetention* pRetension = taosArrayGet(dbCfg.pRetensions, i);
|
||||
STranslateContext cxt = {0};
|
||||
initTranslateContext(pCxt->pParseCxt, &cxt);
|
||||
initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt);
|
||||
code = getRollupAst(&cxt, pStmt, pRetension, dbCfg.precision, 1 == i ? &pReq->pAst1 : &pReq->pAst2,
|
||||
1 == i ? &pReq->ast1Len : &pReq->ast2Len);
|
||||
destroyTranslateContext(&cxt);
|
||||
|
@ -4893,7 +4909,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
int32_t translate(SParseContext* pParseCxt, SQuery* pQuery) {
|
||||
STranslateContext cxt = {0};
|
||||
|
||||
int32_t code = initTranslateContext(pParseCxt, &cxt);
|
||||
int32_t code = initTranslateContext(pParseCxt, pQuery->pMetaCache, &cxt);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = fmFuncMgtInit();
|
||||
}
|
||||
|
|
|
@ -328,11 +328,11 @@ static bool isValidateTag(char* input) {
|
|||
return true;
|
||||
}
|
||||
|
||||
int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* pMsgBuf, int16_t startColId) {
|
||||
int32_t parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* pMsgBuf, int16_t startColId) {
|
||||
// set json NULL data
|
||||
uint8_t jsonNULL = TSDB_DATA_TYPE_NULL;
|
||||
int jsonIndex = startColId + 1;
|
||||
if (!json || strtrim((char*)json) == 0 ||strcasecmp(json, TSDB_DATA_NULL_STR_L) == 0) {
|
||||
int32_t jsonIndex = startColId + 1;
|
||||
if (!json || strtrim((char*)json) == 0 || strcasecmp(json, TSDB_DATA_NULL_STR_L) == 0) {
|
||||
tdAddColToKVRow(kvRowBuilder, jsonIndex, &jsonNULL, CHAR_BYTES);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -343,15 +343,15 @@ int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* p
|
|||
return buildSyntaxErrMsg(pMsgBuf, "json parse error", json);
|
||||
}
|
||||
|
||||
int size = cJSON_GetArraySize(root);
|
||||
int32_t size = cJSON_GetArraySize(root);
|
||||
if (!cJSON_IsObject(root)) {
|
||||
return buildSyntaxErrMsg(pMsgBuf, "json error invalide value", json);
|
||||
}
|
||||
|
||||
int retCode = 0;
|
||||
int32_t retCode = 0;
|
||||
char* tagKV = NULL;
|
||||
SHashObj* keyHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, false);
|
||||
for (int i = 0; i < size; i++) {
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
cJSON* item = cJSON_GetArrayItem(root, i);
|
||||
if (!item) {
|
||||
qError("json inner error:%d", i);
|
||||
|
@ -365,9 +365,9 @@ int parseJsontoTagData(const char* json, SKVRowBuilder* kvRowBuilder, SMsgBuf* p
|
|||
goto end;
|
||||
}
|
||||
size_t keyLen = strlen(jsonKey);
|
||||
if(keyLen > TSDB_MAX_JSON_KEY_LEN){
|
||||
if (keyLen > TSDB_MAX_JSON_KEY_LEN) {
|
||||
qError("json key too long error");
|
||||
retCode = buildSyntaxErrMsg(pMsgBuf, "json key too long, more than 256", jsonKey);
|
||||
retCode = buildSyntaxErrMsg(pMsgBuf, "json key too long, more than 256", jsonKey);
|
||||
goto end;
|
||||
}
|
||||
if (keyLen == 0 || taosHashGet(keyHash, jsonKey, keyLen) != NULL) {
|
||||
|
@ -447,4 +447,186 @@ end:
|
|||
taosHashCleanup(keyHash);
|
||||
cJSON_Delete(root);
|
||||
return retCode;
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t buildTableReq(SHashObj* pTablesHash, SArray** pTables) {
|
||||
if (NULL != pTablesHash) {
|
||||
*pTables = taosArrayInit(taosHashGetSize(pTablesHash), sizeof(SName));
|
||||
if (NULL == *pTables) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
void* p = taosHashIterate(pTablesHash, NULL);
|
||||
while (NULL != p) {
|
||||
size_t len = 0;
|
||||
char* pKey = taosHashGetKey(p, &len);
|
||||
char fullName[TSDB_TABLE_FNAME_LEN] = {0};
|
||||
strncpy(fullName, pKey, len);
|
||||
SName name = {0};
|
||||
tNameFromString(&name, fullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
taosArrayPush(*pTables, &name);
|
||||
p = taosHashIterate(pTablesHash, p);
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t buildDbReq(SHashObj* pDbsHash, SArray** pDbs) {
|
||||
if (NULL != pDbsHash) {
|
||||
*pDbs = taosArrayInit(taosHashGetSize(pDbsHash), TSDB_DB_FNAME_LEN);
|
||||
if (NULL == *pDbs) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
void* p = taosHashIterate(pDbsHash, NULL);
|
||||
while (NULL != p) {
|
||||
size_t len = 0;
|
||||
char* pKey = taosHashGetKey(p, &len);
|
||||
char fullName[TSDB_DB_FNAME_LEN] = {0};
|
||||
strncpy(fullName, pKey, len);
|
||||
taosArrayPush(*pDbs, fullName);
|
||||
p = taosHashIterate(pDbsHash, p);
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t buildTableMetaReq(SHashObj* pTableMetaHash, SArray** pTableMeta) {
|
||||
return buildTableReq(pTableMetaHash, pTableMeta);
|
||||
}
|
||||
|
||||
static int32_t buildDbVgroupReq(SHashObj* pDbVgroupHash, SArray** pDbVgroup) {
|
||||
return buildDbReq(pDbVgroupHash, pDbVgroup);
|
||||
}
|
||||
|
||||
static int32_t buildTableVgroupReq(SHashObj* pTableVgroupHash, SArray** pTableVgroup) {
|
||||
return buildTableReq(pTableVgroupHash, pTableVgroup);
|
||||
}
|
||||
|
||||
static int32_t buildDbCfgReq(SHashObj* pDbCfgHash, SArray** pDbCfg) { return buildDbReq(pDbCfgHash, pDbCfg); }
|
||||
|
||||
int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
|
||||
int32_t code = buildTableMetaReq(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildDbVgroupReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbVgroup);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildTableVgroupReq(pMetaCache->pTableVgroup, &pCatalogReq->pTableHash);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildDbCfgReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbCfg);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t putTableMetaToCache(const SArray* pTableMetaReq, const SArray* pTableMetaData, SHashObj* pTableMeta) {
|
||||
int32_t ntables = taosArrayGetSize(pTableMetaReq);
|
||||
for (int32_t i = 0; i < ntables; ++i) {
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
tNameExtractFullName(taosArrayGet(pTableMetaReq, i), fullName);
|
||||
if (TSDB_CODE_SUCCESS !=
|
||||
taosHashPut(pTableMeta, fullName, strlen(fullName), taosArrayGet(pTableMetaData, i), POINTER_BYTES)) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t putDbVgroupToCache(const SArray* pDbVgroupReq, const SArray* pDbVgroupData, SHashObj* pDbVgroup) {
|
||||
int32_t nvgs = taosArrayGetSize(pDbVgroupReq);
|
||||
for (int32_t i = 0; i < nvgs; ++i) {
|
||||
char* pDbFName = taosArrayGet(pDbVgroupReq, i);
|
||||
if (TSDB_CODE_SUCCESS !=
|
||||
taosHashPut(pDbVgroup, pDbFName, strlen(pDbFName), taosArrayGet(pDbVgroupData, i), POINTER_BYTES)) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t putTableVgroupToCache(const SArray* pTableVgroupReq, const SArray* pTableVgroupData,
|
||||
SHashObj* pTableVgroup) {
|
||||
int32_t ntables = taosArrayGetSize(pTableVgroupReq);
|
||||
for (int32_t i = 0; i < ntables; ++i) {
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
tNameExtractFullName(taosArrayGet(pTableVgroupReq, i), fullName);
|
||||
SVgroupInfo* pInfo = taosArrayGet(pTableVgroupData, i);
|
||||
if (TSDB_CODE_SUCCESS != taosHashPut(pTableVgroup, fullName, strlen(fullName), &pInfo, POINTER_BYTES)) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t putDbCfgToCache(const SArray* pDbCfgReq, const SArray* pDbCfgData, SHashObj* pDbCfg) {
|
||||
int32_t nvgs = taosArrayGetSize(pDbCfgReq);
|
||||
for (int32_t i = 0; i < nvgs; ++i) {
|
||||
char* pDbFName = taosArrayGet(pDbCfgReq, i);
|
||||
SDbCfgInfo* pInfo = taosArrayGet(pDbCfgData, i);
|
||||
if (TSDB_CODE_SUCCESS != taosHashPut(pDbCfg, pDbFName, strlen(pDbFName), &pInfo, POINTER_BYTES)) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
|
||||
int32_t code = putTableMetaToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, pMetaCache->pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = putDbVgroupToCache(pCatalogReq->pDbVgroup, pMetaData->pDbVgroup, pMetaCache->pDbVgroup);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = putTableVgroupToCache(pCatalogReq->pTableHash, pMetaData->pTableHash, pMetaCache->pTableVgroup);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = putDbCfgToCache(pCatalogReq->pDbCfg, pMetaData->pDbCfg, pMetaCache->pDbCfg);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache) {
|
||||
if (NULL == pMetaCache->pTableMeta) {
|
||||
pMetaCache->pTableMeta = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
if (NULL == pMetaCache->pTableMeta) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t len = snprintf(fullName, sizeof(fullName), "%d.%s.%s", acctId, pDb, pTable);
|
||||
return taosHashPut(pMetaCache->pTableMeta, fullName, len, &len, POINTER_BYTES);
|
||||
}
|
||||
|
||||
int32_t getTableMetaFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableMeta** pMeta) {
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
tNameExtractFullName(pName, fullName);
|
||||
*pMeta = taosHashGet(pMetaCache->pTableMeta, fullName, strlen(fullName));
|
||||
return NULL == *pMeta ? TSDB_CODE_PAR_INTERNAL_ERROR : TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getDBVgInfoFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SArray** pVgInfo) {
|
||||
*pVgInfo = taosHashGet(pMetaCache->pDbVgroup, pDbFName, strlen(pDbFName));
|
||||
return NULL == *pVgInfo ? TSDB_CODE_PAR_INTERNAL_ERROR : TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getTableHashVgroupFromCache(SParseMetaCache* pMetaCache, const SName* pName, SVgroupInfo* pVgroup) {
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
tNameExtractFullName(pName, fullName);
|
||||
SVgroupInfo* pInfo = taosHashGet(pMetaCache->pTableVgroup, fullName, strlen(fullName));
|
||||
if (NULL == pInfo) {
|
||||
return TSDB_CODE_PAR_INTERNAL_ERROR;
|
||||
}
|
||||
memcpy(pVgroup, pInfo, sizeof(SVgroupInfo));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getDBVgVersionFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, int32_t* pVersion, int64_t* pDbId,
|
||||
int32_t* pTableNum) {
|
||||
return TSDB_CODE_PAR_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
int32_t getDBCfgFromCache(SParseMetaCache* pMetaCache, const char* pDbFName, SDbCfgInfo* pInfo) {
|
||||
SDbCfgInfo* pDbCfg = taosHashGet(pMetaCache->pDbCfg, pDbFName, strlen(pDbFName));
|
||||
if (NULL == pDbCfg) {
|
||||
return TSDB_CODE_PAR_INTERNAL_ERROR;
|
||||
}
|
||||
memcpy(pInfo, pDbCfg, sizeof(SDbCfgInfo));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -34,22 +34,27 @@ bool qIsInsertSql(const char* pStr, size_t length) {
|
|||
} while (1);
|
||||
}
|
||||
|
||||
static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
|
||||
int32_t code = parse(pCxt, pQuery);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = authenticate(pCxt, *pQuery);
|
||||
}
|
||||
static int32_t semanticAnalysis(SParseContext* pCxt, SQuery* pQuery) {
|
||||
int32_t code = authenticate(pCxt, pQuery);
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && (*pQuery)->placeholderNum > 0) {
|
||||
TSWAP((*pQuery)->pPrepareRoot, (*pQuery)->pRoot);
|
||||
if (TSDB_CODE_SUCCESS == code && pQuery->placeholderNum > 0) {
|
||||
TSWAP(pQuery->pPrepareRoot, pQuery->pRoot);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translate(pCxt, *pQuery);
|
||||
code = translate(pCxt, pQuery);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = calculateConstant(pCxt, *pQuery);
|
||||
code = calculateConstant(pCxt, pQuery);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t parseSqlIntoAst(SParseContext* pCxt, SQuery** pQuery) {
|
||||
int32_t code = parse(pCxt, pQuery);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = semanticAnalysis(pCxt, *pQuery);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
@ -178,6 +183,29 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) {
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t qSyntaxParseSql(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (qIsInsertSql(pCxt->pSql, pCxt->sqlLen)) {
|
||||
// todo insert sql
|
||||
} else {
|
||||
code = parse(pCxt, pQuery);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildCatalogReq((*pQuery)->pMetaCache, pCatalogReq);
|
||||
}
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t qSemanticAnalysisSql(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
|
||||
const struct SMetaData* pMetaData, SQuery* pQuery) {
|
||||
int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, pQuery->pMetaCache);
|
||||
if (NULL == pQuery->pRoot) {
|
||||
// todo insert sql
|
||||
}
|
||||
return semanticAnalysis(pCxt, pQuery);
|
||||
}
|
||||
|
||||
void qDestroyQuery(SQuery* pQueryNode) { nodesDestroyNode(pQueryNode); }
|
||||
|
||||
int32_t qExtractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema) {
|
||||
|
|
|
@ -163,17 +163,17 @@ int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandl
|
|||
|
||||
int32_t __catalogGetTableMeta(struct SCatalog* pCatalog, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName,
|
||||
STableMeta** pTableMeta) {
|
||||
return mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta);
|
||||
return g_mockCatalogService->catalogGetTableMeta(pTableName, pTableMeta);
|
||||
}
|
||||
|
||||
int32_t __catalogGetTableHashVgroup(struct SCatalog* pCatalog, void* pRpc, const SEpSet* pMgmtEps,
|
||||
const SName* pTableName, SVgroupInfo* vgInfo) {
|
||||
return mockCatalogService->catalogGetTableHashVgroup(pTableName, vgInfo);
|
||||
return g_mockCatalogService->catalogGetTableHashVgroup(pTableName, vgInfo);
|
||||
}
|
||||
|
||||
int32_t __catalogGetTableDistVgInfo(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, const SName* pTableName,
|
||||
SArray** pVgList) {
|
||||
return mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList);
|
||||
return g_mockCatalogService->catalogGetTableDistVgInfo(pTableName, pVgList);
|
||||
}
|
||||
|
||||
int32_t __catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId,
|
||||
|
@ -197,7 +197,7 @@ int32_t __catalogChkAuth(SCatalog* pCtg, void* pRpc, const SEpSet* pMgmtEps, con
|
|||
}
|
||||
|
||||
void initMetaDataEnv() {
|
||||
mockCatalogService.reset(new MockCatalogService());
|
||||
g_mockCatalogService.reset(new MockCatalogService());
|
||||
|
||||
static Stub stub;
|
||||
stub.set(catalogGetHandle, __catalogGetHandle);
|
||||
|
@ -252,11 +252,11 @@ void initMetaDataEnv() {
|
|||
}
|
||||
|
||||
void generateMetaData() {
|
||||
generateInformationSchema(mockCatalogService.get());
|
||||
generatePerformanceSchema(mockCatalogService.get());
|
||||
generateTestT1(mockCatalogService.get());
|
||||
generateTestST1(mockCatalogService.get());
|
||||
mockCatalogService->showTables();
|
||||
generateInformationSchema(g_mockCatalogService.get());
|
||||
generatePerformanceSchema(g_mockCatalogService.get());
|
||||
generateTestT1(g_mockCatalogService.get());
|
||||
generateTestST1(g_mockCatalogService.get());
|
||||
g_mockCatalogService->showTables();
|
||||
}
|
||||
|
||||
void destroyMetaDataEnv() { mockCatalogService.reset(); }
|
||||
void destroyMetaDataEnv() { g_mockCatalogService.reset(); }
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "tname.h"
|
||||
#include "ttypes.h"
|
||||
|
||||
std::unique_ptr<MockCatalogService> mockCatalogService;
|
||||
std::unique_ptr<MockCatalogService> g_mockCatalogService;
|
||||
|
||||
class TableBuilder : public ITableBuilder {
|
||||
public:
|
||||
|
@ -120,6 +120,14 @@ class MockCatalogServiceImpl {
|
|||
return copyTableVgroup(db, tNameGetTableName(pTableName), vgList);
|
||||
}
|
||||
|
||||
int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const {
|
||||
int32_t code = getAllTableMeta(pCatalogReq->pTableMeta, &pMetaData->pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = getAllTableVgroup(pCatalogReq->pTableHash, &pMetaData->pTableHash);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
TableBuilder& createTableBuilder(const std::string& db, const std::string& tbname, int8_t tableType,
|
||||
int32_t numOfColumns, int32_t numOfTags) {
|
||||
builder_ = TableBuilder::createTableBuilder(tableType, numOfColumns, numOfTags);
|
||||
|
@ -300,6 +308,42 @@ class MockCatalogServiceImpl {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getAllTableMeta(SArray* pTableMetaReq, SArray** pTableMetaData) const {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (NULL != pTableMetaReq) {
|
||||
int32_t ntables = taosArrayGetSize(pTableMetaReq);
|
||||
*pTableMetaData = taosArrayInit(ntables, POINTER_BYTES);
|
||||
for (int32_t i = 0; i < ntables; ++i) {
|
||||
STableMeta* pMeta = NULL;
|
||||
code = catalogGetTableMeta((const SName*)taosArrayGet(pTableMetaReq, i), &pMeta);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
taosArrayPush(*pTableMetaData, &pMeta);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t getAllTableVgroup(SArray* pTableVgroupReq, SArray** pTableVgroupData) const {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (NULL != pTableVgroupReq) {
|
||||
int32_t ntables = taosArrayGetSize(pTableVgroupReq);
|
||||
*pTableVgroupData = taosArrayInit(ntables, POINTER_BYTES);
|
||||
for (int32_t i = 0; i < ntables; ++i) {
|
||||
SVgroupInfo* pVgInfo = (SVgroupInfo*)taosMemoryCalloc(1, sizeof(SVgroupInfo));
|
||||
code = catalogGetTableHashVgroup((const SName*)taosArrayGet(pTableVgroupReq, i), pVgInfo);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
taosArrayPush(*pTableVgroupData, &pVgInfo);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
uint64_t id_;
|
||||
std::unique_ptr<TableBuilder> builder_;
|
||||
DbMetaCache meta_;
|
||||
|
@ -337,3 +381,7 @@ int32_t MockCatalogService::catalogGetTableHashVgroup(const SName* pTableName, S
|
|||
int32_t MockCatalogService::catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const {
|
||||
return impl_->catalogGetTableDistVgInfo(pTableName, pVgList);
|
||||
}
|
||||
|
||||
int32_t MockCatalogService::catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const {
|
||||
return impl_->catalogGetAllMeta(pCatalogReq, pMetaData);
|
||||
}
|
||||
|
|
|
@ -61,11 +61,12 @@ class MockCatalogService {
|
|||
int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const;
|
||||
int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const;
|
||||
int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const;
|
||||
int32_t catalogGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) const;
|
||||
|
||||
private:
|
||||
std::unique_ptr<MockCatalogServiceImpl> impl_;
|
||||
};
|
||||
|
||||
extern std::unique_ptr<MockCatalogService> mockCatalogService;
|
||||
extern std::unique_ptr<MockCatalogService> g_mockCatalogService;
|
||||
|
||||
#endif // MOCK_CATALOG_SERVICE_H
|
||||
|
|
|
@ -52,11 +52,15 @@ class ParserEnv : public testing::Environment {
|
|||
static void parseArg(int argc, char* argv[]) {
|
||||
int opt = 0;
|
||||
const char* optstring = "";
|
||||
static struct option long_options[] = {{"dump", no_argument, NULL, 'd'}, {0, 0, 0, 0}};
|
||||
static struct option long_options[] = {
|
||||
{"dump", no_argument, NULL, 'd'}, {"async", no_argument, NULL, 'a'}, {0, 0, 0, 0}};
|
||||
while ((opt = getopt_long(argc, argv, optstring, long_options, NULL)) != -1) {
|
||||
switch (opt) {
|
||||
case 'd':
|
||||
g_isDump = true;
|
||||
g_dump = true;
|
||||
break;
|
||||
case 'a':
|
||||
g_testAsyncApis = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -17,7 +17,10 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <thread>
|
||||
|
||||
#include "catalog.h"
|
||||
#include "mockCatalogService.h"
|
||||
#include "parInt.h"
|
||||
|
||||
using namespace std;
|
||||
|
@ -41,7 +44,8 @@ namespace ParserTest {
|
|||
} \
|
||||
} while (0);
|
||||
|
||||
bool g_isDump = false;
|
||||
bool g_dump = false;
|
||||
bool g_testAsyncApis = false;
|
||||
|
||||
struct TerminateFlag : public exception {
|
||||
const char* what() const throw() { return "success and terminate"; }
|
||||
|
@ -69,7 +73,60 @@ class ParserTestBaseImpl {
|
|||
|
||||
doCalculateConstant(&cxt, pQuery);
|
||||
|
||||
if (g_isDump) {
|
||||
if (g_dump) {
|
||||
dump();
|
||||
}
|
||||
} catch (const TerminateFlag& e) {
|
||||
// success and terminate
|
||||
return;
|
||||
} catch (...) {
|
||||
dump();
|
||||
throw;
|
||||
}
|
||||
|
||||
if (g_testAsyncApis) {
|
||||
runAsync(sql, expect, checkStage);
|
||||
}
|
||||
}
|
||||
|
||||
void runAsync(const string& sql, int32_t expect, ParserStage checkStage) {
|
||||
reset(expect, checkStage);
|
||||
try {
|
||||
SParseContext cxt = {0};
|
||||
setParseContext(sql, &cxt, true);
|
||||
|
||||
SQuery* pQuery = nullptr;
|
||||
doParse(&cxt, &pQuery);
|
||||
|
||||
SCatalogReq catalogReq = {0};
|
||||
doBuildCatalogReq(pQuery->pMetaCache, &catalogReq);
|
||||
|
||||
string err;
|
||||
thread t1([&]() {
|
||||
try {
|
||||
SMetaData metaData = {0};
|
||||
doGetAllMeta(&catalogReq, &metaData);
|
||||
|
||||
doPutMetaDataToCache(&catalogReq, &metaData, pQuery->pMetaCache);
|
||||
|
||||
doTranslate(&cxt, pQuery);
|
||||
|
||||
doCalculateConstant(&cxt, pQuery);
|
||||
} catch (const TerminateFlag& e) {
|
||||
// success and terminate
|
||||
} catch (const runtime_error& e) {
|
||||
err = e.what();
|
||||
} catch (...) {
|
||||
err = "unknown error";
|
||||
}
|
||||
});
|
||||
|
||||
t1.join();
|
||||
if (!err.empty()) {
|
||||
throw runtime_error(err);
|
||||
}
|
||||
|
||||
if (g_dump) {
|
||||
dump();
|
||||
}
|
||||
} catch (const TerminateFlag& e) {
|
||||
|
@ -144,7 +201,7 @@ class ParserTestBaseImpl {
|
|||
cout << res_.calcConstAst_ << endl;
|
||||
}
|
||||
|
||||
void setParseContext(const string& sql, SParseContext* pCxt) {
|
||||
void setParseContext(const string& sql, SParseContext* pCxt, bool async = false) {
|
||||
stmtEnv_.sql_ = sql;
|
||||
transform(stmtEnv_.sql_.begin(), stmtEnv_.sql_.end(), stmtEnv_.sql_.begin(), ::tolower);
|
||||
|
||||
|
@ -154,6 +211,7 @@ class ParserTestBaseImpl {
|
|||
pCxt->sqlLen = stmtEnv_.sql_.length();
|
||||
pCxt->pMsg = stmtEnv_.msgBuf_.data();
|
||||
pCxt->msgLen = stmtEnv_.msgBuf_.max_size();
|
||||
pCxt->async = async;
|
||||
}
|
||||
|
||||
void doParse(SParseContext* pCxt, SQuery** pQuery) {
|
||||
|
@ -162,6 +220,18 @@ class ParserTestBaseImpl {
|
|||
res_.parsedAst_ = toString((*pQuery)->pRoot);
|
||||
}
|
||||
|
||||
void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
|
||||
DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq);
|
||||
}
|
||||
|
||||
void doGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) {
|
||||
DO_WITH_THROW(g_mockCatalogService->catalogGetAllMeta, pCatalogReq, pMetaData);
|
||||
}
|
||||
|
||||
void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
|
||||
DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache);
|
||||
}
|
||||
|
||||
void doTranslate(SParseContext* pCxt, SQuery* pQuery) {
|
||||
DO_WITH_THROW(translate, pCxt, pQuery);
|
||||
checkQuery(pQuery, PARSER_STAGE_TRANSLATE);
|
||||
|
@ -200,6 +270,10 @@ void ParserTestBase::run(const std::string& sql, int32_t expect, ParserStage che
|
|||
return impl_->run(sql, expect, checkStage);
|
||||
}
|
||||
|
||||
void ParserTestBase::runAsync(const std::string& sql, int32_t expect, ParserStage checkStage) {
|
||||
return impl_->runAsync(sql, expect, checkStage);
|
||||
}
|
||||
|
||||
void ParserTestBase::checkDdl(const SQuery* pQuery, ParserStage stage) { return; }
|
||||
|
||||
} // namespace ParserTest
|
||||
|
|
|
@ -36,6 +36,7 @@ class ParserTestBase : public testing::Test {
|
|||
|
||||
void useDb(const std::string& acctId, const std::string& db);
|
||||
void run(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL);
|
||||
void runAsync(const std::string& sql, int32_t expect = TSDB_CODE_SUCCESS, ParserStage checkStage = PARSER_STAGE_ALL);
|
||||
|
||||
virtual void checkDdl(const SQuery* pQuery, ParserStage stage);
|
||||
|
||||
|
@ -63,7 +64,8 @@ class ParserDdlTest : public ParserTestBase {
|
|||
std::function<void(const SQuery*, ParserStage)> checkDdl_;
|
||||
};
|
||||
|
||||
extern bool g_isDump;
|
||||
extern bool g_dump;
|
||||
extern bool g_testAsyncApis;
|
||||
|
||||
} // namespace ParserTest
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ extern "C" {
|
|||
#include "ttimer.h"
|
||||
#include "tref.h"
|
||||
#include "plannodes.h"
|
||||
|
||||
#include "executor.h"
|
||||
#include "trpc.h"
|
||||
|
||||
#define QW_DEFAULT_SCHEDULER_NUMBER 10000
|
||||
|
@ -76,6 +76,8 @@ typedef struct SQWDebug {
|
|||
bool dumpEnable;
|
||||
} SQWDebug;
|
||||
|
||||
extern SQWDebug gQWDebug;
|
||||
|
||||
typedef struct SQWMsg {
|
||||
void *node;
|
||||
int32_t code;
|
||||
|
@ -303,9 +305,27 @@ typedef struct SQWorkerMgmt {
|
|||
extern SQWorkerMgmt gQwMgmt;
|
||||
|
||||
static FORCE_INLINE SQWorker *qwAcquire(int64_t refId) { return (SQWorker *)taosAcquireRef(atomic_load_32(&gQwMgmt.qwRef), refId); }
|
||||
|
||||
static FORCE_INLINE int32_t qwRelease(int64_t refId) { return taosReleaseRef(gQwMgmt.qwRef, refId); }
|
||||
|
||||
char *qwPhaseStr(int32_t phase);
|
||||
char *qwBufStatusStr(int32_t bufStatus);
|
||||
int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch);
|
||||
void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt);
|
||||
int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status);
|
||||
int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
|
||||
int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
|
||||
int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx);
|
||||
void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx);
|
||||
int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx);
|
||||
int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status);
|
||||
int32_t qwDropTask(QW_FPARAMS_DEF);
|
||||
void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx);
|
||||
int32_t qwOpenRef(void);
|
||||
void qwSetHbParam(int64_t refId, SQWHbParam **pParam);
|
||||
|
||||
void qwDbgDumpMgmtInfo(SQWorker *mgmt);
|
||||
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
|
@ -20,7 +20,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "qworkerInt.h"
|
||||
#include "qwInt.h"
|
||||
#include "dataSinkMgt.h"
|
||||
|
||||
int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, int8_t taskType, int8_t explain);
|
||||
|
@ -36,12 +36,10 @@ int32_t qwBuildAndSendFetchRsp(SRpcHandleInfo *pConn, SRetrieveTableRsp *pRsp, i
|
|||
int32_t code);
|
||||
void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete);
|
||||
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
|
||||
int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo);
|
||||
int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code);
|
||||
int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo);
|
||||
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num);
|
||||
void qwFreeFetchRsp(void *msg);
|
||||
int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp);
|
||||
int32_t qwGetSchTasksStatus(SQWorker *mgmt, uint64_t sId, SSchedulerStatusRsp **rsp);
|
||||
int32_t qwBuildAndSendHbRsp(SRpcHandleInfo *pConn, SSchedulerHbRsp *rsp, int32_t code);
|
||||
int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
|
||||
int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *pConn);
|
|
@ -0,0 +1,128 @@
|
|||
#include "qworker.h"
|
||||
#include "dataSinkMgt.h"
|
||||
#include "executor.h"
|
||||
#include "planner.h"
|
||||
#include "query.h"
|
||||
#include "qwInt.h"
|
||||
#include "qwMsg.h"
|
||||
#include "tcommon.h"
|
||||
#include "tmsg.h"
|
||||
#include "tname.h"
|
||||
|
||||
SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = true};
|
||||
|
||||
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) {
|
||||
if (!gQWDebug.statusEnable) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t code = 0;
|
||||
|
||||
if (oriStatus == newStatus) {
|
||||
if (newStatus == JOB_TASK_STATUS_EXECUTING || newStatus == JOB_TASK_STATUS_FAILED) {
|
||||
*ignore = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
switch (oriStatus) {
|
||||
case JOB_TASK_STATUS_NULL:
|
||||
if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_FAILED &&
|
||||
newStatus != JOB_TASK_STATUS_NOT_START) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_NOT_START:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_EXECUTING:
|
||||
if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED &&
|
||||
newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING &&
|
||||
newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_PARTIAL_SUCCEED:
|
||||
if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED &&
|
||||
newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED &&
|
||||
newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_SUCCEED:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING &&
|
||||
newStatus != JOB_TASK_STATUS_FAILED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_FAILED:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
break;
|
||||
|
||||
case JOB_TASK_STATUS_CANCELLING:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_CANCELLED:
|
||||
case JOB_TASK_STATUS_DROPPING:
|
||||
if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
QW_TASK_ELOG("invalid task origStatus:%s", jobTaskStatusStr(oriStatus));
|
||||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
||||
QW_TASK_ELOG("invalid task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus));
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
void qwDbgDumpSchInfo(SQWSchStatus *sch, int32_t i) {}
|
||||
|
||||
void qwDbgDumpMgmtInfo(SQWorker *mgmt) {
|
||||
if (!gQWDebug.dumpEnable) {
|
||||
return;
|
||||
}
|
||||
|
||||
QW_LOCK(QW_READ, &mgmt->schLock);
|
||||
|
||||
/*QW_DUMP("total remain schduler num:%d", taosHashGetSize(mgmt->schHash));*/
|
||||
|
||||
void *key = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t i = 0;
|
||||
SQWSchStatus *sch = NULL;
|
||||
|
||||
void *pIter = taosHashIterate(mgmt->schHash, NULL);
|
||||
while (pIter) {
|
||||
sch = (SQWSchStatus *)pIter;
|
||||
qwDbgDumpSchInfo(sch, i);
|
||||
++i;
|
||||
pIter = taosHashIterate(mgmt->schHash, pIter);
|
||||
}
|
||||
|
||||
QW_UNLOCK(QW_READ, &mgmt->schLock);
|
||||
|
||||
/*QW_DUMP("total remain ctx num:%d", taosHashGetSize(mgmt->ctxHash));*/
|
||||
}
|
||||
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
#include "qworkerMsg.h"
|
||||
#include "qwMsg.h"
|
||||
#include "dataSinkMgt.h"
|
||||
#include "executor.h"
|
||||
#include "planner.h"
|
||||
#include "query.h"
|
||||
#include "qworker.h"
|
||||
#include "qworkerInt.h"
|
||||
#include "qwInt.h"
|
||||
#include "tcommon.h"
|
||||
#include "tmsg.h"
|
||||
#include "tname.h"
|
||||
|
@ -43,28 +43,8 @@ void qwFreeFetchRsp(void *msg) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code) {
|
||||
SQueryTableRsp rsp = {.code = code};
|
||||
|
||||
int32_t contLen = tSerializeSQueryTableRsp(NULL, 0, &rsp);
|
||||
void * msg = rpcMallocCont(contLen);
|
||||
tSerializeSQueryTableRsp(msg, contLen, &rsp);
|
||||
|
||||
SRpcMsg rpcRsp = {
|
||||
.msgType = TDMT_VND_QUERY_RSP,
|
||||
.pCont = msg,
|
||||
.contLen = contLen,
|
||||
.code = code,
|
||||
.info = *pConn,
|
||||
};
|
||||
|
||||
tmsgSendRsp(&rpcRsp);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) {
|
||||
SResReadyRsp *pRsp = (SResReadyRsp *)rpcMallocCont(sizeof(SResReadyRsp));
|
||||
int32_t qwBuildAndSendQueryRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo* tbInfo) {
|
||||
SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp));
|
||||
pRsp->code = code;
|
||||
if (tbInfo) {
|
||||
strcpy(pRsp->tbFName, tbInfo->tbFName);
|
||||
|
@ -73,13 +53,12 @@ int32_t qwBuildAndSendReadyRsp(SRpcHandleInfo *pConn, int32_t code, STbVerInfo*
|
|||
}
|
||||
|
||||
SRpcMsg rpcRsp = {
|
||||
.msgType = TDMT_VND_RES_READY_RSP,
|
||||
.msgType = TDMT_VND_QUERY_RSP,
|
||||
.pCont = pRsp,
|
||||
.contLen = sizeof(*pRsp),
|
||||
.code = code,
|
||||
.info = *pConn,
|
||||
};
|
||||
rpcRsp.info.ahandle = NULL;
|
||||
|
||||
tmsgSendRsp(&rpcRsp);
|
||||
|
||||
|
@ -177,76 +156,6 @@ int32_t qwBuildAndSendDropRsp(SRpcHandleInfo *pConn, int32_t code) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwBuildAndSendShowRsp(SRpcMsg *pMsg, int32_t code) {
|
||||
int32_t numOfCols = 6;
|
||||
SVShowTablesRsp showRsp = {0};
|
||||
|
||||
// showRsp.showId = 1;
|
||||
showRsp.tableMeta.pSchemas = taosMemoryCalloc(numOfCols, sizeof(SSchema));
|
||||
if (showRsp.tableMeta.pSchemas == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
col_id_t cols = 0;
|
||||
SSchema *pSchema = showRsp.tableMeta.pSchemas;
|
||||
|
||||
const SSchema *s = tGetTbnameColumnSchema();
|
||||
*pSchema = createSchema(s->type, s->bytes, ++cols, "name");
|
||||
pSchema++;
|
||||
|
||||
int32_t type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
*pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "created");
|
||||
pSchema++;
|
||||
|
||||
type = TSDB_DATA_TYPE_SMALLINT;
|
||||
*pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "columns");
|
||||
pSchema++;
|
||||
|
||||
*pSchema = createSchema(s->type, s->bytes, ++cols, "stable");
|
||||
pSchema++;
|
||||
|
||||
type = TSDB_DATA_TYPE_BIGINT;
|
||||
*pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "uid");
|
||||
pSchema++;
|
||||
|
||||
type = TSDB_DATA_TYPE_INT;
|
||||
*pSchema = createSchema(type, tDataTypes[type].bytes, ++cols, "vgId");
|
||||
|
||||
assert(cols == numOfCols);
|
||||
showRsp.tableMeta.numOfColumns = cols;
|
||||
|
||||
int32_t bufLen = tSerializeSShowRsp(NULL, 0, &showRsp);
|
||||
void * pBuf = rpcMallocCont(bufLen);
|
||||
tSerializeSShowRsp(pBuf, bufLen, &showRsp);
|
||||
|
||||
SRpcMsg rpcMsg = {
|
||||
.info = pMsg->info,
|
||||
.pCont = pBuf,
|
||||
.contLen = bufLen,
|
||||
.code = code,
|
||||
};
|
||||
|
||||
tmsgSendRsp(&rpcMsg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwBuildAndSendShowFetchRsp(SRpcMsg *pMsg, SVShowTablesFetchReq *pFetchReq) {
|
||||
SVShowTablesFetchRsp *pRsp = (SVShowTablesFetchRsp *)rpcMallocCont(sizeof(SVShowTablesFetchRsp));
|
||||
int32_t handle = htonl(pFetchReq->id);
|
||||
|
||||
pRsp->numOfRows = 0;
|
||||
SRpcMsg rpcMsg = {
|
||||
.info = pMsg->info,
|
||||
.pCont = pRsp,
|
||||
.contLen = sizeof(*pRsp),
|
||||
.code = 0,
|
||||
};
|
||||
|
||||
tmsgSendRsp(&rpcMsg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn) {
|
||||
SQueryContinueReq *req = (SQueryContinueReq *)rpcMallocCont(sizeof(SQueryContinueReq));
|
||||
if (NULL == req) {
|
||||
|
@ -407,65 +316,6 @@ int32_t qWorkerProcessCQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qWorkerProcessReadyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
|
||||
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
SQWorker * mgmt = (SQWorker *)qWorkerMgmt;
|
||||
SResReadyReq *msg = pMsg->pCont;
|
||||
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
|
||||
QW_ELOG("invalid task ready msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
||||
msg->sId = be64toh(msg->sId);
|
||||
msg->queryId = be64toh(msg->queryId);
|
||||
msg->taskId = be64toh(msg->taskId);
|
||||
|
||||
uint64_t sId = msg->sId;
|
||||
uint64_t qId = msg->queryId;
|
||||
uint64_t tId = msg->taskId;
|
||||
int64_t rId = 0;
|
||||
|
||||
SQWMsg qwMsg = {.node = node, .msg = NULL, .msgLen = 0, .connInfo = pMsg->info};
|
||||
|
||||
QW_SCH_TASK_DLOG("processReady start, node:%p, handle:%p", node, pMsg->info.handle);
|
||||
|
||||
QW_ERR_RET(qwProcessReady(QW_FPARAMS(), &qwMsg));
|
||||
|
||||
QW_SCH_TASK_DLOG("processReady end, node:%p", node);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qWorkerProcessStatusMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
|
||||
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
int32_t code = 0;
|
||||
SSchTasksStatusReq *msg = pMsg->pCont;
|
||||
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
|
||||
qError("invalid task status msg");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
||||
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
|
||||
msg->sId = htobe64(msg->sId);
|
||||
uint64_t sId = msg->sId;
|
||||
|
||||
SSchedulerStatusRsp *sStatus = NULL;
|
||||
|
||||
// QW_ERR_JRET(qwGetSchTasksStatus(qWorkerMgmt, msg->sId, &sStatus));
|
||||
|
||||
_return:
|
||||
|
||||
// QW_ERR_RET(qwBuildAndSendStatusRsp(pMsg, sStatus));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
|
||||
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
|
@ -613,22 +463,3 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qWorkerProcessShowMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
|
||||
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
int32_t code = 0;
|
||||
SVShowTablesReq *pReq = pMsg->pCont;
|
||||
QW_RET(qwBuildAndSendShowRsp(pMsg, code));
|
||||
}
|
||||
|
||||
int32_t qWorkerProcessShowFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg) {
|
||||
if (NULL == node || NULL == qWorkerMgmt || NULL == pMsg) {
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
SVShowTablesFetchReq *pFetchReq = pMsg->pCont;
|
||||
QW_RET(qwBuildAndSendShowFetchRsp(pMsg, pFetchReq));
|
||||
}
|
|
@ -0,0 +1,502 @@
|
|||
#include "qworker.h"
|
||||
#include "dataSinkMgt.h"
|
||||
#include "executor.h"
|
||||
#include "planner.h"
|
||||
#include "query.h"
|
||||
#include "qwInt.h"
|
||||
#include "qwMsg.h"
|
||||
#include "tcommon.h"
|
||||
#include "tmsg.h"
|
||||
#include "tname.h"
|
||||
|
||||
char *qwPhaseStr(int32_t phase) {
|
||||
switch (phase) {
|
||||
case QW_PHASE_PRE_QUERY:
|
||||
return "PRE_QUERY";
|
||||
case QW_PHASE_POST_QUERY:
|
||||
return "POST_QUERY";
|
||||
case QW_PHASE_PRE_FETCH:
|
||||
return "PRE_FETCH";
|
||||
case QW_PHASE_POST_FETCH:
|
||||
return "POST_FETCH";
|
||||
case QW_PHASE_PRE_CQUERY:
|
||||
return "PRE_CQUERY";
|
||||
case QW_PHASE_POST_CQUERY:
|
||||
return "POST_CQUERY";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
char *qwBufStatusStr(int32_t bufStatus) {
|
||||
switch (bufStatus) {
|
||||
case DS_BUF_LOW:
|
||||
return "LOW";
|
||||
case DS_BUF_FULL:
|
||||
return "FULL";
|
||||
case DS_BUF_EMPTY:
|
||||
return "EMPTY";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
int32_t qwSetTaskStatus(QW_FPARAMS_DEF, SQWTaskStatus *task, int8_t status) {
|
||||
int32_t code = 0;
|
||||
int8_t origStatus = 0;
|
||||
bool ignore = false;
|
||||
|
||||
while (true) {
|
||||
origStatus = atomic_load_8(&task->status);
|
||||
|
||||
QW_ERR_RET(qwDbgValidateStatus(QW_FPARAMS(), origStatus, status, &ignore));
|
||||
if (ignore) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (origStatus != atomic_val_compare_exchange_8(&task->status, origStatus, status)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
QW_TASK_DLOG("task status updated from %s to %s", jobTaskStatusStr(origStatus), jobTaskStatusStr(status));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType) {
|
||||
SQWSchStatus newSch = {0};
|
||||
newSch.tasksHash =
|
||||
taosHashInit(mgmt->cfg.maxSchTaskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
if (NULL == newSch.tasksHash) {
|
||||
QW_SCH_ELOG("taosHashInit %d failed", mgmt->cfg.maxSchTaskNum);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
QW_LOCK(QW_WRITE, &mgmt->schLock);
|
||||
int32_t code = taosHashPut(mgmt->schHash, &sId, sizeof(sId), &newSch, sizeof(newSch));
|
||||
if (0 != code) {
|
||||
if (!HASH_NODE_EXIST(code)) {
|
||||
QW_UNLOCK(QW_WRITE, &mgmt->schLock);
|
||||
|
||||
QW_SCH_ELOG("taosHashPut new sch to scheduleHash failed, errno:%d", errno);
|
||||
taosHashCleanup(newSch.tasksHash);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
taosHashCleanup(newSch.tasksHash);
|
||||
}
|
||||
QW_UNLOCK(QW_WRITE, &mgmt->schLock);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAcquireSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch, int32_t nOpt) {
|
||||
while (true) {
|
||||
QW_LOCK(rwType, &mgmt->schLock);
|
||||
*sch = taosHashGet(mgmt->schHash, &sId, sizeof(sId));
|
||||
if (NULL == (*sch)) {
|
||||
QW_UNLOCK(rwType, &mgmt->schLock);
|
||||
|
||||
if (QW_NOT_EXIST_ADD == nOpt) {
|
||||
QW_ERR_RET(qwAddSchedulerImpl(mgmt, sId, rwType));
|
||||
|
||||
nOpt = QW_NOT_EXIST_RET_ERR;
|
||||
|
||||
continue;
|
||||
} else if (QW_NOT_EXIST_RET_ERR == nOpt) {
|
||||
QW_RET(TSDB_CODE_QRY_SCH_NOT_EXIST);
|
||||
} else {
|
||||
QW_SCH_ELOG("unknown notExistOpt:%d", nOpt);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) {
|
||||
return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_ADD);
|
||||
}
|
||||
|
||||
int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) {
|
||||
return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_RET_ERR);
|
||||
}
|
||||
|
||||
void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); }
|
||||
|
||||
int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
QW_LOCK(rwType, &sch->tasksLock);
|
||||
*task = taosHashGet(sch->tasksHash, id, sizeof(id));
|
||||
if (NULL == (*task)) {
|
||||
QW_UNLOCK(rwType, &sch->tasksLock);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_NOT_EXIST);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) {
|
||||
int32_t code = 0;
|
||||
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
SQWTaskStatus ntask = {0};
|
||||
ntask.status = status;
|
||||
ntask.refId = rId;
|
||||
|
||||
QW_LOCK(QW_WRITE, &sch->tasksLock);
|
||||
code = taosHashPut(sch->tasksHash, id, sizeof(id), &ntask, sizeof(ntask));
|
||||
if (0 != code) {
|
||||
QW_UNLOCK(QW_WRITE, &sch->tasksLock);
|
||||
if (HASH_NODE_EXIST(code)) {
|
||||
if (rwType && task) {
|
||||
QW_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task));
|
||||
} else {
|
||||
QW_TASK_ELOG("task status already exist, newStatus:%s", jobTaskStatusStr(status));
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST);
|
||||
}
|
||||
} else {
|
||||
QW_TASK_ELOG("taosHashPut to tasksHash failed, error:%x - %s", code, tstrerror(code));
|
||||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
QW_UNLOCK(QW_WRITE, &sch->tasksLock);
|
||||
|
||||
QW_TASK_DLOG("task status added, newStatus:%s", jobTaskStatusStr(status));
|
||||
|
||||
if (rwType && task) {
|
||||
QW_ERR_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task));
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status) {
|
||||
SQWSchStatus *tsch = NULL;
|
||||
int32_t code = 0;
|
||||
QW_ERR_RET(qwAcquireAddScheduler(mgmt, sId, QW_READ, &tsch));
|
||||
|
||||
QW_ERR_JRET(qwAddTaskStatusImpl(QW_FPARAMS(), tsch, 0, status, NULL));
|
||||
|
||||
_return:
|
||||
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, int32_t status,
|
||||
SQWTaskStatus **task) {
|
||||
return qwAddTaskStatusImpl(QW_FPARAMS(), sch, rwType, status, task);
|
||||
}
|
||||
|
||||
void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); }
|
||||
|
||||
int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
*ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id));
|
||||
if (NULL == (*ctx)) {
|
||||
QW_TASK_DLOG_E("task ctx not exist, may be dropped");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
*ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
|
||||
if (NULL == (*ctx)) {
|
||||
QW_TASK_DLOG_E("task ctx not exist, may be dropped");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
SQWTaskCtx nctx = {0};
|
||||
|
||||
int32_t code = taosHashPut(mgmt->ctxHash, id, sizeof(id), &nctx, sizeof(SQWTaskCtx));
|
||||
if (0 != code) {
|
||||
if (HASH_NODE_EXIST(code)) {
|
||||
if (acquire && ctx) {
|
||||
QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx));
|
||||
} else if (ctx) {
|
||||
QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx));
|
||||
} else {
|
||||
QW_TASK_ELOG_E("task ctx already exist");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST);
|
||||
}
|
||||
} else {
|
||||
QW_TASK_ELOG("taosHashPut to ctxHash failed, error:%x", code);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
if (acquire && ctx) {
|
||||
QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx));
|
||||
} else if (ctx) {
|
||||
QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx));
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddTaskCtx(QW_FPARAMS_DEF) { QW_RET(qwAddTaskCtxImpl(QW_FPARAMS(), false, NULL)); }
|
||||
|
||||
int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTaskCtxImpl(QW_FPARAMS(), true, ctx); }
|
||||
|
||||
void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); }
|
||||
|
||||
void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) {
|
||||
// Note: free/kill may in RC
|
||||
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
|
||||
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
|
||||
qDestroyTask(otaskHandle);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||
int32_t code = 0;
|
||||
// Note: free/kill may in RC
|
||||
qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle);
|
||||
if (taskHandle && atomic_val_compare_exchange_ptr(&ctx->taskHandle, taskHandle, NULL)) {
|
||||
code = qAsyncKillTask(taskHandle);
|
||||
atomic_store_ptr(&ctx->taskHandle, taskHandle);
|
||||
}
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||
tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER);
|
||||
ctx->ctrlConnInfo.handle = NULL;
|
||||
ctx->ctrlConnInfo.refId = -1;
|
||||
|
||||
// NO need to release dataConnInfo
|
||||
|
||||
qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle);
|
||||
|
||||
if (ctx->sinkHandle) {
|
||||
dsDestroyDataSinker(ctx->sinkHandle);
|
||||
ctx->sinkHandle = NULL;
|
||||
}
|
||||
|
||||
if (ctx->plan) {
|
||||
nodesDestroyNode(ctx->plan);
|
||||
ctx->plan = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
SQWTaskCtx octx;
|
||||
|
||||
SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
|
||||
if (NULL == ctx) {
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
octx = *ctx;
|
||||
|
||||
atomic_store_ptr(&ctx->taskHandle, NULL);
|
||||
atomic_store_ptr(&ctx->sinkHandle, NULL);
|
||||
atomic_store_ptr(&ctx->plan, NULL);
|
||||
|
||||
QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP);
|
||||
|
||||
if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) {
|
||||
QW_TASK_ELOG_E("taosHashRemove from ctx hash failed");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
qwFreeTask(QW_FPARAMS(), &octx);
|
||||
|
||||
QW_TASK_DLOG_E("task ctx dropped");
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwDropTaskStatus(QW_FPARAMS_DEF) {
|
||||
SQWSchStatus *sch = NULL;
|
||||
SQWTaskStatus *task = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) {
|
||||
QW_TASK_WLOG_E("scheduler does not exist");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (qwAcquireTaskStatus(QW_FPARAMS(), QW_WRITE, sch, &task)) {
|
||||
qwReleaseScheduler(QW_WRITE, mgmt);
|
||||
|
||||
QW_TASK_WLOG_E("task does not exist");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (taosHashRemove(sch->tasksHash, id, sizeof(id))) {
|
||||
QW_TASK_ELOG_E("taosHashRemove task from hash failed");
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
QW_TASK_DLOG_E("task status dropped");
|
||||
|
||||
_return:
|
||||
|
||||
if (task) {
|
||||
qwReleaseTaskStatus(QW_WRITE, sch);
|
||||
}
|
||||
qwReleaseScheduler(QW_WRITE, mgmt);
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status) {
|
||||
SQWSchStatus *sch = NULL;
|
||||
SQWTaskStatus *task = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch));
|
||||
QW_ERR_JRET(qwAcquireTaskStatus(QW_FPARAMS(), QW_READ, sch, &task));
|
||||
|
||||
QW_ERR_JRET(qwSetTaskStatus(QW_FPARAMS(), task, status));
|
||||
|
||||
_return:
|
||||
|
||||
if (task) {
|
||||
qwReleaseTaskStatus(QW_READ, sch);
|
||||
}
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
int32_t qwDropTask(QW_FPARAMS_DEF) {
|
||||
QW_ERR_RET(qwDropTaskStatus(QW_FPARAMS()));
|
||||
QW_ERR_RET(qwDropTaskCtx(QW_FPARAMS()));
|
||||
|
||||
QW_TASK_DLOG_E("task is dropped");
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
void qwSetHbParam(int64_t refId, SQWHbParam **pParam) {
|
||||
int32_t paramIdx = 0;
|
||||
int32_t newParamIdx = 0;
|
||||
|
||||
while (true) {
|
||||
paramIdx = atomic_load_32(&gQwMgmt.paramIdx);
|
||||
if (paramIdx == tListLen(gQwMgmt.param)) {
|
||||
newParamIdx = 0;
|
||||
} else {
|
||||
newParamIdx = paramIdx + 1;
|
||||
}
|
||||
|
||||
if (paramIdx == atomic_val_compare_exchange_32(&gQwMgmt.paramIdx, paramIdx, newParamIdx)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
gQwMgmt.param[paramIdx].qwrId = gQwMgmt.qwRef;
|
||||
gQwMgmt.param[paramIdx].refId = refId;
|
||||
|
||||
*pParam = &gQwMgmt.param[paramIdx];
|
||||
}
|
||||
|
||||
|
||||
void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
|
||||
char dbFName[TSDB_DB_FNAME_LEN];
|
||||
char tbName[TSDB_TABLE_NAME_LEN];
|
||||
|
||||
qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion);
|
||||
|
||||
if (dbFName[0] && tbName[0]) {
|
||||
sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName);
|
||||
} else {
|
||||
ctx->tbInfo.tbFName[0] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void qwCloseRef(void) {
|
||||
taosWLockLatch(&gQwMgmt.lock);
|
||||
if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) {
|
||||
taosCloseRef(gQwMgmt.qwRef);
|
||||
gQwMgmt.qwRef = -1;
|
||||
}
|
||||
taosWUnLockLatch(&gQwMgmt.lock);
|
||||
}
|
||||
|
||||
|
||||
void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); }
|
||||
|
||||
void qwDestroyImpl(void *pMgmt) {
|
||||
SQWorker *mgmt = (SQWorker *)pMgmt;
|
||||
|
||||
taosTmrStopA(&mgmt->hbTimer);
|
||||
taosTmrCleanUp(mgmt->timer);
|
||||
|
||||
// TODO STOP ALL QUERY
|
||||
|
||||
// TODO FREE ALL
|
||||
|
||||
taosHashCleanup(mgmt->ctxHash);
|
||||
|
||||
void *pIter = taosHashIterate(mgmt->schHash, NULL);
|
||||
while (pIter) {
|
||||
SQWSchStatus *sch = (SQWSchStatus *)pIter;
|
||||
qwDestroySchStatus(sch);
|
||||
pIter = taosHashIterate(mgmt->schHash, pIter);
|
||||
}
|
||||
taosHashCleanup(mgmt->schHash);
|
||||
|
||||
taosMemoryFree(mgmt);
|
||||
|
||||
atomic_sub_fetch_32(&gQwMgmt.qwNum, 1);
|
||||
|
||||
qwCloseRef();
|
||||
}
|
||||
|
||||
int32_t qwOpenRef(void) {
|
||||
taosWLockLatch(&gQwMgmt.lock);
|
||||
if (gQwMgmt.qwRef < 0) {
|
||||
gQwMgmt.qwRef = taosOpenRef(100, qwDestroyImpl);
|
||||
if (gQwMgmt.qwRef < 0) {
|
||||
taosWUnLockLatch(&gQwMgmt.lock);
|
||||
qError("init qworker ref failed");
|
||||
QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
taosWUnLockLatch(&gQwMgmt.lock);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
@ -3,528 +3,43 @@
|
|||
#include "executor.h"
|
||||
#include "planner.h"
|
||||
#include "query.h"
|
||||
#include "qworkerInt.h"
|
||||
#include "qworkerMsg.h"
|
||||
#include "qwInt.h"
|
||||
#include "qwMsg.h"
|
||||
#include "tcommon.h"
|
||||
#include "tmsg.h"
|
||||
#include "tname.h"
|
||||
|
||||
SQWDebug gQWDebug = {.statusEnable = true, .dumpEnable = true};
|
||||
SQWorkerMgmt gQwMgmt = {
|
||||
.lock = 0,
|
||||
.qwRef = -1,
|
||||
.qwNum = 0,
|
||||
};
|
||||
|
||||
int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, bool *ignore) {
|
||||
if (!gQWDebug.statusEnable) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) {
|
||||
int32_t code = 0;
|
||||
SSchedulerHbRsp rsp = {0};
|
||||
SQWSchStatus *sch = NULL;
|
||||
|
||||
QW_ERR_RET(qwAcquireAddScheduler(mgmt, req->sId, QW_READ, &sch));
|
||||
|
||||
QW_LOCK(QW_WRITE, &sch->hbConnLock);
|
||||
|
||||
if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) {
|
||||
tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER);
|
||||
sch->hbConnInfo.handle = NULL;
|
||||
sch->hbConnInfo.ahandle = NULL;
|
||||
|
||||
QW_DLOG("release hb handle due to connection broken, handle:%p", qwMsg->connInfo.handle);
|
||||
} else {
|
||||
QW_DLOG("ignore hb connection broken, handle:%p, currentHandle:%p", qwMsg->connInfo.handle, sch->hbConnInfo.handle);
|
||||
}
|
||||
|
||||
int32_t code = 0;
|
||||
|
||||
if (oriStatus == newStatus) {
|
||||
if (newStatus == JOB_TASK_STATUS_EXECUTING || newStatus == JOB_TASK_STATUS_FAILED) {
|
||||
*ignore = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
switch (oriStatus) {
|
||||
case JOB_TASK_STATUS_NULL:
|
||||
if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_FAILED &&
|
||||
newStatus != JOB_TASK_STATUS_NOT_START) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_NOT_START:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_EXECUTING:
|
||||
if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED &&
|
||||
newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING &&
|
||||
newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_PARTIAL_SUCCEED:
|
||||
if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED &&
|
||||
newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED &&
|
||||
newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_SUCCEED:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING &&
|
||||
newStatus != JOB_TASK_STATUS_FAILED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_FAILED:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
break;
|
||||
|
||||
case JOB_TASK_STATUS_CANCELLING:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_CANCELLED:
|
||||
case JOB_TASK_STATUS_DROPPING:
|
||||
if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
QW_TASK_ELOG("invalid task origStatus:%s", jobTaskStatusStr(oriStatus));
|
||||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
||||
QW_TASK_ELOG("invalid task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus));
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
void qwDbgDumpSchInfo(SQWSchStatus *sch, int32_t i) {}
|
||||
|
||||
void qwDbgDumpMgmtInfo(SQWorker *mgmt) {
|
||||
if (!gQWDebug.dumpEnable) {
|
||||
return;
|
||||
}
|
||||
|
||||
QW_LOCK(QW_READ, &mgmt->schLock);
|
||||
|
||||
/*QW_DUMP("total remain schduler num:%d", taosHashGetSize(mgmt->schHash));*/
|
||||
|
||||
void *key = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t i = 0;
|
||||
SQWSchStatus *sch = NULL;
|
||||
|
||||
void *pIter = taosHashIterate(mgmt->schHash, NULL);
|
||||
while (pIter) {
|
||||
sch = (SQWSchStatus *)pIter;
|
||||
qwDbgDumpSchInfo(sch, i);
|
||||
++i;
|
||||
pIter = taosHashIterate(mgmt->schHash, pIter);
|
||||
}
|
||||
|
||||
QW_UNLOCK(QW_READ, &mgmt->schLock);
|
||||
|
||||
/*QW_DUMP("total remain ctx num:%d", taosHashGetSize(mgmt->ctxHash));*/
|
||||
}
|
||||
|
||||
char *qwPhaseStr(int32_t phase) {
|
||||
switch (phase) {
|
||||
case QW_PHASE_PRE_QUERY:
|
||||
return "PRE_QUERY";
|
||||
case QW_PHASE_POST_QUERY:
|
||||
return "POST_QUERY";
|
||||
case QW_PHASE_PRE_FETCH:
|
||||
return "PRE_FETCH";
|
||||
case QW_PHASE_POST_FETCH:
|
||||
return "POST_FETCH";
|
||||
case QW_PHASE_PRE_CQUERY:
|
||||
return "PRE_CQUERY";
|
||||
case QW_PHASE_POST_CQUERY:
|
||||
return "POST_CQUERY";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
char *qwBufStatusStr(int32_t bufStatus) {
|
||||
switch (bufStatus) {
|
||||
case DS_BUF_LOW:
|
||||
return "LOW";
|
||||
case DS_BUF_FULL:
|
||||
return "FULL";
|
||||
case DS_BUF_EMPTY:
|
||||
return "EMPTY";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
int32_t qwSetTaskStatus(QW_FPARAMS_DEF, SQWTaskStatus *task, int8_t status) {
|
||||
int32_t code = 0;
|
||||
int8_t origStatus = 0;
|
||||
bool ignore = false;
|
||||
|
||||
while (true) {
|
||||
origStatus = atomic_load_8(&task->status);
|
||||
|
||||
QW_ERR_RET(qwDbgValidateStatus(QW_FPARAMS(), origStatus, status, &ignore));
|
||||
if (ignore) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (origStatus != atomic_val_compare_exchange_8(&task->status, origStatus, status)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
QW_TASK_DLOG("task status updated from %s to %s", jobTaskStatusStr(origStatus), jobTaskStatusStr(status));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType) {
|
||||
SQWSchStatus newSch = {0};
|
||||
newSch.tasksHash =
|
||||
taosHashInit(mgmt->cfg.maxSchTaskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
if (NULL == newSch.tasksHash) {
|
||||
QW_SCH_ELOG("taosHashInit %d failed", mgmt->cfg.maxSchTaskNum);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
QW_LOCK(QW_WRITE, &mgmt->schLock);
|
||||
int32_t code = taosHashPut(mgmt->schHash, &sId, sizeof(sId), &newSch, sizeof(newSch));
|
||||
if (0 != code) {
|
||||
if (!HASH_NODE_EXIST(code)) {
|
||||
QW_UNLOCK(QW_WRITE, &mgmt->schLock);
|
||||
|
||||
QW_SCH_ELOG("taosHashPut new sch to scheduleHash failed, errno:%d", errno);
|
||||
taosHashCleanup(newSch.tasksHash);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
taosHashCleanup(newSch.tasksHash);
|
||||
}
|
||||
QW_UNLOCK(QW_WRITE, &mgmt->schLock);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAcquireSchedulerImpl(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch, int32_t nOpt) {
|
||||
while (true) {
|
||||
QW_LOCK(rwType, &mgmt->schLock);
|
||||
*sch = taosHashGet(mgmt->schHash, &sId, sizeof(sId));
|
||||
if (NULL == (*sch)) {
|
||||
QW_UNLOCK(rwType, &mgmt->schLock);
|
||||
|
||||
if (QW_NOT_EXIST_ADD == nOpt) {
|
||||
QW_ERR_RET(qwAddSchedulerImpl(mgmt, sId, rwType));
|
||||
|
||||
nOpt = QW_NOT_EXIST_RET_ERR;
|
||||
|
||||
continue;
|
||||
} else if (QW_NOT_EXIST_RET_ERR == nOpt) {
|
||||
QW_RET(TSDB_CODE_QRY_SCH_NOT_EXIST);
|
||||
} else {
|
||||
QW_SCH_ELOG("unknown notExistOpt:%d", nOpt);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAcquireAddScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) {
|
||||
return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_ADD);
|
||||
}
|
||||
|
||||
int32_t qwAcquireScheduler(SQWorker *mgmt, uint64_t sId, int32_t rwType, SQWSchStatus **sch) {
|
||||
return qwAcquireSchedulerImpl(mgmt, sId, rwType, sch, QW_NOT_EXIST_RET_ERR);
|
||||
}
|
||||
|
||||
void qwReleaseScheduler(int32_t rwType, SQWorker *mgmt) { QW_UNLOCK(rwType, &mgmt->schLock); }
|
||||
|
||||
int32_t qwAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, SQWTaskStatus **task) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
QW_LOCK(rwType, &sch->tasksLock);
|
||||
*task = taosHashGet(sch->tasksHash, id, sizeof(id));
|
||||
if (NULL == (*task)) {
|
||||
QW_UNLOCK(rwType, &sch->tasksLock);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_NOT_EXIST);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddTaskStatusImpl(QW_FPARAMS_DEF, SQWSchStatus *sch, int32_t rwType, int32_t status, SQWTaskStatus **task) {
|
||||
int32_t code = 0;
|
||||
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
SQWTaskStatus ntask = {0};
|
||||
ntask.status = status;
|
||||
ntask.refId = rId;
|
||||
|
||||
QW_LOCK(QW_WRITE, &sch->tasksLock);
|
||||
code = taosHashPut(sch->tasksHash, id, sizeof(id), &ntask, sizeof(ntask));
|
||||
if (0 != code) {
|
||||
QW_UNLOCK(QW_WRITE, &sch->tasksLock);
|
||||
if (HASH_NODE_EXIST(code)) {
|
||||
if (rwType && task) {
|
||||
QW_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task));
|
||||
} else {
|
||||
QW_TASK_ELOG("task status already exist, newStatus:%s", jobTaskStatusStr(status));
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST);
|
||||
}
|
||||
} else {
|
||||
QW_TASK_ELOG("taosHashPut to tasksHash failed, error:%x - %s", code, tstrerror(code));
|
||||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
QW_UNLOCK(QW_WRITE, &sch->tasksLock);
|
||||
|
||||
QW_TASK_DLOG("task status added, newStatus:%s", jobTaskStatusStr(status));
|
||||
|
||||
if (rwType && task) {
|
||||
QW_ERR_RET(qwAcquireTaskStatus(QW_FPARAMS(), rwType, sch, task));
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddTaskStatus(QW_FPARAMS_DEF, int32_t status) {
|
||||
SQWSchStatus *tsch = NULL;
|
||||
int32_t code = 0;
|
||||
QW_ERR_RET(qwAcquireAddScheduler(mgmt, sId, QW_READ, &tsch));
|
||||
|
||||
QW_ERR_JRET(qwAddTaskStatusImpl(QW_FPARAMS(), tsch, 0, status, NULL));
|
||||
|
||||
_return:
|
||||
QW_UNLOCK(QW_WRITE, &sch->hbConnLock);
|
||||
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
int32_t qwAddAcquireTaskStatus(QW_FPARAMS_DEF, int32_t rwType, SQWSchStatus *sch, int32_t status,
|
||||
SQWTaskStatus **task) {
|
||||
return qwAddTaskStatusImpl(QW_FPARAMS(), sch, rwType, status, task);
|
||||
}
|
||||
|
||||
void qwReleaseTaskStatus(int32_t rwType, SQWSchStatus *sch) { QW_UNLOCK(rwType, &sch->tasksLock); }
|
||||
|
||||
int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
*ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id));
|
||||
if (NULL == (*ctx)) {
|
||||
QW_TASK_DLOG_E("task ctx not exist, may be dropped");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
*ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
|
||||
if (NULL == (*ctx)) {
|
||||
QW_TASK_DLOG_E("task ctx not exist, may be dropped");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddTaskCtxImpl(QW_FPARAMS_DEF, bool acquire, SQWTaskCtx **ctx) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
SQWTaskCtx nctx = {0};
|
||||
|
||||
int32_t code = taosHashPut(mgmt->ctxHash, id, sizeof(id), &nctx, sizeof(SQWTaskCtx));
|
||||
if (0 != code) {
|
||||
if (HASH_NODE_EXIST(code)) {
|
||||
if (acquire && ctx) {
|
||||
QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx));
|
||||
} else if (ctx) {
|
||||
QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx));
|
||||
} else {
|
||||
QW_TASK_ELOG_E("task ctx already exist");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_ALREADY_EXIST);
|
||||
}
|
||||
} else {
|
||||
QW_TASK_ELOG("taosHashPut to ctxHash failed, error:%x", code);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
if (acquire && ctx) {
|
||||
QW_RET(qwAcquireTaskCtx(QW_FPARAMS(), ctx));
|
||||
} else if (ctx) {
|
||||
QW_RET(qwGetTaskCtx(QW_FPARAMS(), ctx));
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwAddTaskCtx(QW_FPARAMS_DEF) { QW_RET(qwAddTaskCtxImpl(QW_FPARAMS(), false, NULL)); }
|
||||
|
||||
int32_t qwAddAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) { return qwAddTaskCtxImpl(QW_FPARAMS(), true, ctx); }
|
||||
|
||||
void qwReleaseTaskCtx(SQWorker *mgmt, void *ctx) { taosHashRelease(mgmt->ctxHash, ctx); }
|
||||
|
||||
void qwFreeTaskHandle(QW_FPARAMS_DEF, qTaskInfo_t *taskHandle) {
|
||||
// Note: free/kill may in RC
|
||||
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
|
||||
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
|
||||
qDestroyTask(otaskHandle);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t qwKillTaskHandle(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||
int32_t code = 0;
|
||||
// Note: free/kill may in RC
|
||||
qTaskInfo_t taskHandle = atomic_load_ptr(&ctx->taskHandle);
|
||||
if (taskHandle && atomic_val_compare_exchange_ptr(&ctx->taskHandle, taskHandle, NULL)) {
|
||||
code = qAsyncKillTask(taskHandle);
|
||||
atomic_store_ptr(&ctx->taskHandle, taskHandle);
|
||||
}
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
void qwFreeTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||
tmsgReleaseHandle(&ctx->ctrlConnInfo, TAOS_CONN_SERVER);
|
||||
ctx->ctrlConnInfo.handle = NULL;
|
||||
ctx->ctrlConnInfo.refId = -1;
|
||||
|
||||
// NO need to release dataConnInfo
|
||||
|
||||
qwFreeTaskHandle(QW_FPARAMS(), &ctx->taskHandle);
|
||||
|
||||
if (ctx->sinkHandle) {
|
||||
dsDestroyDataSinker(ctx->sinkHandle);
|
||||
ctx->sinkHandle = NULL;
|
||||
}
|
||||
|
||||
if (ctx->plan) {
|
||||
nodesDestroyNode(ctx->plan);
|
||||
ctx->plan = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t qwDropTaskCtx(QW_FPARAMS_DEF) {
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
SQWTaskCtx octx;
|
||||
|
||||
SQWTaskCtx *ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
|
||||
if (NULL == ctx) {
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
octx = *ctx;
|
||||
|
||||
atomic_store_ptr(&ctx->taskHandle, NULL);
|
||||
atomic_store_ptr(&ctx->sinkHandle, NULL);
|
||||
atomic_store_ptr(&ctx->plan, NULL);
|
||||
|
||||
QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_DROP);
|
||||
|
||||
if (taosHashRemove(mgmt->ctxHash, id, sizeof(id))) {
|
||||
QW_TASK_ELOG_E("taosHashRemove from ctx hash failed");
|
||||
QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
|
||||
}
|
||||
|
||||
qwFreeTask(QW_FPARAMS(), &octx);
|
||||
|
||||
QW_TASK_DLOG_E("task ctx dropped");
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwDropTaskStatus(QW_FPARAMS_DEF) {
|
||||
SQWSchStatus *sch = NULL;
|
||||
SQWTaskStatus *task = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
char id[sizeof(qId) + sizeof(tId)] = {0};
|
||||
QW_SET_QTID(id, qId, tId);
|
||||
|
||||
if (qwAcquireScheduler(mgmt, sId, QW_WRITE, &sch)) {
|
||||
QW_TASK_WLOG_E("scheduler does not exist");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (qwAcquireTaskStatus(QW_FPARAMS(), QW_WRITE, sch, &task)) {
|
||||
qwReleaseScheduler(QW_WRITE, mgmt);
|
||||
|
||||
QW_TASK_WLOG_E("task does not exist");
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (taosHashRemove(sch->tasksHash, id, sizeof(id))) {
|
||||
QW_TASK_ELOG_E("taosHashRemove task from hash failed");
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
QW_TASK_DLOG_E("task status dropped");
|
||||
|
||||
_return:
|
||||
|
||||
if (task) {
|
||||
qwReleaseTaskStatus(QW_WRITE, sch);
|
||||
}
|
||||
qwReleaseScheduler(QW_WRITE, mgmt);
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
int32_t qwUpdateTaskStatus(QW_FPARAMS_DEF, int8_t status) {
|
||||
SQWSchStatus *sch = NULL;
|
||||
SQWTaskStatus *task = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch));
|
||||
QW_ERR_JRET(qwAcquireTaskStatus(QW_FPARAMS(), QW_READ, sch, &task));
|
||||
|
||||
QW_ERR_JRET(qwSetTaskStatus(QW_FPARAMS(), task, status));
|
||||
|
||||
_return:
|
||||
|
||||
if (task) {
|
||||
qwReleaseTaskStatus(QW_READ, sch);
|
||||
}
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
int32_t qwDropTask(QW_FPARAMS_DEF) {
|
||||
QW_ERR_RET(qwDropTaskStatus(QW_FPARAMS()));
|
||||
QW_ERR_RET(qwDropTaskCtx(QW_FPARAMS()));
|
||||
|
||||
QW_TASK_DLOG_E("task is dropped");
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
QW_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||
|
@ -722,23 +237,9 @@ int32_t qwGetResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen, void
|
|||
}
|
||||
|
||||
|
||||
void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
|
||||
char dbFName[TSDB_DB_FNAME_LEN];
|
||||
char tbName[TSDB_TABLE_NAME_LEN];
|
||||
|
||||
qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion);
|
||||
|
||||
if (dbFName[0] && tbName[0]) {
|
||||
sprintf(ctx->tbInfo.tbFName, "%s.%s", dbFName, tbName);
|
||||
} else {
|
||||
ctx->tbInfo.tbFName[0] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) {
|
||||
int32_t code = 0;
|
||||
SQWTaskCtx *ctx = NULL;
|
||||
SRpcHandleInfo *dropConnection = NULL;
|
||||
SRpcHandleInfo *cancelConnection = NULL;
|
||||
|
||||
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
|
||||
|
@ -771,12 +272,10 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
|
|||
}
|
||||
|
||||
if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
|
||||
dropConnection = &ctx->ctrlConnInfo;
|
||||
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
|
||||
dropConnection = NULL;
|
||||
|
||||
qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
|
||||
QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
|
||||
//qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
|
||||
//QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
|
||||
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
|
||||
break;
|
||||
|
@ -809,12 +308,10 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
|
|||
}
|
||||
|
||||
if (QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
|
||||
dropConnection = &ctx->ctrlConnInfo;
|
||||
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
|
||||
dropConnection = NULL;
|
||||
|
||||
qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
|
||||
QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
|
||||
//qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
|
||||
//QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
|
||||
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
|
||||
}
|
||||
|
@ -839,11 +336,6 @@ _return:
|
|||
qwReleaseTaskCtx(mgmt, ctx);
|
||||
}
|
||||
|
||||
if (dropConnection) {
|
||||
qwBuildAndSendDropRsp(dropConnection, code);
|
||||
QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", dropConnection->handle, code, tstrerror(code));
|
||||
}
|
||||
|
||||
if (cancelConnection) {
|
||||
qwBuildAndSendCancelRsp(cancelConnection, code);
|
||||
QW_TASK_DLOG("cancel rsp send, handle:%p, code:%x - %s", cancelConnection->handle, code, tstrerror(code));
|
||||
|
@ -862,7 +354,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
|
|||
int32_t code = 0;
|
||||
SQWTaskCtx *ctx = NULL;
|
||||
SRpcHandleInfo connInfo = {0};
|
||||
SRpcHandleInfo *readyConnection = NULL;
|
||||
SRpcHandleInfo *rspConnection = NULL;
|
||||
|
||||
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
|
||||
|
||||
|
@ -883,7 +375,7 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
|
|||
}
|
||||
#else
|
||||
connInfo = ctx->ctrlConnInfo;
|
||||
readyConnection = &connInfo;
|
||||
rspConnection = &connInfo;
|
||||
|
||||
QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_READY);
|
||||
#endif
|
||||
|
@ -895,8 +387,8 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
|
|||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
|
||||
QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
|
||||
//qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
|
||||
//QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
|
||||
|
||||
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
|
||||
|
@ -916,9 +408,9 @@ _return:
|
|||
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PARTIAL_SUCCEED);
|
||||
}
|
||||
|
||||
if (readyConnection) {
|
||||
qwBuildAndSendReadyRsp(readyConnection, code, ctx ? &ctx->tbInfo : NULL);
|
||||
QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", readyConnection->handle, code, tstrerror(code));
|
||||
if (rspConnection) {
|
||||
qwBuildAndSendQueryRsp(rspConnection, code, ctx ? &ctx->tbInfo : NULL);
|
||||
QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", rspConnection->handle, code, tstrerror(code));
|
||||
}
|
||||
|
||||
if (ctx) {
|
||||
|
@ -1009,69 +501,6 @@ _return:
|
|||
QW_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int32_t qwProcessReady(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
||||
int32_t code = 0;
|
||||
SQWTaskCtx *ctx = NULL;
|
||||
int8_t phase = 0;
|
||||
bool needRsp = true;
|
||||
|
||||
QW_ERR_JRET(qwAcquireTaskCtx(QW_FPARAMS(), &ctx));
|
||||
|
||||
QW_LOCK(QW_WRITE, &ctx->lock);
|
||||
|
||||
if (QW_IS_EVENT_PROCESSED(ctx, QW_EVENT_DROP) || QW_IS_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
|
||||
QW_TASK_WLOG_E("task is dropping or already dropped");
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
|
||||
}
|
||||
|
||||
if (ctx->phase == QW_PHASE_PRE_QUERY) {
|
||||
ctx->ctrlConnInfo = qwMsg->connInfo;
|
||||
QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_READY);
|
||||
needRsp = false;
|
||||
QW_TASK_DLOG_E("ready msg will not rsp now");
|
||||
goto _return;
|
||||
}
|
||||
|
||||
QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_READY);
|
||||
|
||||
if (atomic_load_8((int8_t *)&ctx->queryEnd) || atomic_load_8((int8_t *)&ctx->queryFetched)) {
|
||||
QW_TASK_ELOG("got ready msg at wrong status, queryEnd:%d, queryFetched:%d", atomic_load_8((int8_t *)&ctx->queryEnd),
|
||||
atomic_load_8((int8_t *)&ctx->queryFetched));
|
||||
QW_ERR_JRET(TSDB_CODE_QW_MSG_ERROR);
|
||||
}
|
||||
|
||||
if (ctx->phase == QW_PHASE_POST_QUERY) {
|
||||
code = ctx->rspCode;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
QW_TASK_ELOG("invalid phase when got ready msg, phase:%s", qwPhaseStr(ctx->phase));
|
||||
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_TASK_STATUS_ERROR);
|
||||
|
||||
_return:
|
||||
|
||||
if (code && ctx) {
|
||||
QW_UPDATE_RSP_CODE(ctx, code);
|
||||
}
|
||||
|
||||
if (code) {
|
||||
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_FAILED);
|
||||
}
|
||||
|
||||
if (ctx) {
|
||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||
qwReleaseTaskCtx(mgmt, ctx);
|
||||
}
|
||||
|
||||
if (needRsp) {
|
||||
qwBuildAndSendReadyRsp(&qwMsg->connInfo, code, NULL);
|
||||
QW_TASK_DLOG("ready msg rsped, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
|
||||
}
|
||||
|
||||
QW_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
||||
SQWTaskCtx *ctx = NULL;
|
||||
int32_t code = 0;
|
||||
|
@ -1245,11 +674,6 @@ int32_t qwProcessDrop(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
QW_ERR_JRET(qwKillTaskHandle(QW_FPARAMS(), ctx));
|
||||
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_DROPPING);
|
||||
} else if (ctx->phase > 0) {
|
||||
if (0 == qwMsg->code) {
|
||||
qwBuildAndSendDropRsp(&qwMsg->connInfo, code);
|
||||
QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
|
||||
}
|
||||
|
||||
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
|
||||
rsped = true;
|
||||
} else {
|
||||
|
@ -1280,37 +704,6 @@ _return:
|
|||
qwReleaseTaskCtx(mgmt, ctx);
|
||||
}
|
||||
|
||||
if ((TSDB_CODE_SUCCESS != code) && (0 == qwMsg->code)) {
|
||||
qwBuildAndSendDropRsp(&qwMsg->connInfo, code);
|
||||
QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
|
||||
}
|
||||
|
||||
QW_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) {
|
||||
int32_t code = 0;
|
||||
SSchedulerHbRsp rsp = {0};
|
||||
SQWSchStatus *sch = NULL;
|
||||
|
||||
QW_ERR_RET(qwAcquireAddScheduler(mgmt, req->sId, QW_READ, &sch));
|
||||
|
||||
QW_LOCK(QW_WRITE, &sch->hbConnLock);
|
||||
|
||||
if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) {
|
||||
tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER);
|
||||
sch->hbConnInfo.handle = NULL;
|
||||
sch->hbConnInfo.ahandle = NULL;
|
||||
|
||||
QW_DLOG("release hb handle due to connection broken, handle:%p", qwMsg->connInfo.handle);
|
||||
} else {
|
||||
QW_DLOG("ignore hb connection broken, handle:%p, currentHandle:%p", qwMsg->connInfo.handle, sch->hbConnInfo.handle);
|
||||
}
|
||||
|
||||
QW_UNLOCK(QW_WRITE, &sch->hbConnLock);
|
||||
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
QW_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
|
@ -1441,81 +834,6 @@ _return:
|
|||
qwRelease(refId);
|
||||
}
|
||||
|
||||
void qwCloseRef(void) {
|
||||
taosWLockLatch(&gQwMgmt.lock);
|
||||
if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) {
|
||||
taosCloseRef(gQwMgmt.qwRef);
|
||||
gQwMgmt.qwRef = -1;
|
||||
}
|
||||
taosWUnLockLatch(&gQwMgmt.lock);
|
||||
}
|
||||
|
||||
void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); }
|
||||
|
||||
void qwDestroyImpl(void *pMgmt) {
|
||||
SQWorker *mgmt = (SQWorker *)pMgmt;
|
||||
|
||||
taosTmrStopA(&mgmt->hbTimer);
|
||||
taosTmrCleanUp(mgmt->timer);
|
||||
|
||||
// TODO STOP ALL QUERY
|
||||
|
||||
// TODO FREE ALL
|
||||
|
||||
taosHashCleanup(mgmt->ctxHash);
|
||||
|
||||
void *pIter = taosHashIterate(mgmt->schHash, NULL);
|
||||
while (pIter) {
|
||||
SQWSchStatus *sch = (SQWSchStatus *)pIter;
|
||||
qwDestroySchStatus(sch);
|
||||
pIter = taosHashIterate(mgmt->schHash, pIter);
|
||||
}
|
||||
taosHashCleanup(mgmt->schHash);
|
||||
|
||||
taosMemoryFree(mgmt);
|
||||
|
||||
atomic_sub_fetch_32(&gQwMgmt.qwNum, 1);
|
||||
|
||||
qwCloseRef();
|
||||
}
|
||||
|
||||
int32_t qwOpenRef(void) {
|
||||
taosWLockLatch(&gQwMgmt.lock);
|
||||
if (gQwMgmt.qwRef < 0) {
|
||||
gQwMgmt.qwRef = taosOpenRef(100, qwDestroyImpl);
|
||||
if (gQwMgmt.qwRef < 0) {
|
||||
taosWUnLockLatch(&gQwMgmt.lock);
|
||||
qError("init qworker ref failed");
|
||||
QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
taosWUnLockLatch(&gQwMgmt.lock);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void qwSetHbParam(int64_t refId, SQWHbParam **pParam) {
|
||||
int32_t paramIdx = 0;
|
||||
int32_t newParamIdx = 0;
|
||||
|
||||
while (true) {
|
||||
paramIdx = atomic_load_32(&gQwMgmt.paramIdx);
|
||||
if (paramIdx == tListLen(gQwMgmt.param)) {
|
||||
newParamIdx = 0;
|
||||
} else {
|
||||
newParamIdx = paramIdx + 1;
|
||||
}
|
||||
|
||||
if (paramIdx == atomic_val_compare_exchange_32(&gQwMgmt.paramIdx, paramIdx, newParamIdx)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
gQwMgmt.param[paramIdx].qwrId = gQwMgmt.qwRef;
|
||||
gQwMgmt.param[paramIdx].refId = refId;
|
||||
|
||||
*pParam = &gQwMgmt.param[paramIdx];
|
||||
}
|
||||
|
||||
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) {
|
||||
if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) {
|
||||
|
@ -1632,146 +950,4 @@ void qWorkerDestroy(void **qWorkerMgmt) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t qwGetSchTasksStatus(SQWorker *mgmt, uint64_t sId, SSchedulerStatusRsp **rsp) {
|
||||
/*
|
||||
SQWSchStatus *sch = NULL;
|
||||
int32_t taskNum = 0;
|
||||
|
||||
QW_ERR_RET(qwAcquireScheduler(mgmt, sId, QW_READ, &sch));
|
||||
|
||||
sch->lastAccessTs = taosGetTimestampSec();
|
||||
|
||||
QW_LOCK(QW_READ, &sch->tasksLock);
|
||||
|
||||
taskNum = taosHashGetSize(sch->tasksHash);
|
||||
|
||||
int32_t size = sizeof(SSchedulerStatusRsp) + sizeof((*rsp)->status[0]) * taskNum;
|
||||
*rsp = taosMemoryCalloc(1, size);
|
||||
if (NULL == *rsp) {
|
||||
QW_SCH_ELOG("calloc %d failed", size);
|
||||
QW_UNLOCK(QW_READ, &sch->tasksLock);
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
void *key = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t i = 0;
|
||||
|
||||
void *pIter = taosHashIterate(sch->tasksHash, NULL);
|
||||
while (pIter) {
|
||||
SQWTaskStatus *taskStatus = (SQWTaskStatus *)pIter;
|
||||
taosHashGetKey(pIter, &key, &keyLen);
|
||||
|
||||
QW_GET_QTID(key, (*rsp)->status[i].queryId, (*rsp)->status[i].taskId);
|
||||
(*rsp)->status[i].status = taskStatus->status;
|
||||
|
||||
++i;
|
||||
pIter = taosHashIterate(sch->tasksHash, pIter);
|
||||
}
|
||||
|
||||
QW_UNLOCK(QW_READ, &sch->tasksLock);
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
(*rsp)->num = taskNum;
|
||||
*/
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwUpdateSchLastAccess(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId) {
|
||||
SQWSchStatus *sch = NULL;
|
||||
|
||||
/*
|
||||
QW_ERR_RET(qwAcquireScheduler(QW_READ, mgmt, sId, &sch));
|
||||
|
||||
sch->lastAccessTs = taosGetTimestampSec();
|
||||
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
*/
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwGetTaskStatus(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId, int8_t *taskStatus) {
|
||||
SQWSchStatus *sch = NULL;
|
||||
SQWTaskStatus *task = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
/*
|
||||
if (qwAcquireScheduler(QW_READ, mgmt, sId, &sch)) {
|
||||
*taskStatus = JOB_TASK_STATUS_NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (qwAcquireTask(mgmt, QW_READ, sch, queryId, taskId, &task)) {
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
*taskStatus = JOB_TASK_STATUS_NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
*taskStatus = task->status;
|
||||
|
||||
qwReleaseTask(QW_READ, sch);
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
*/
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
||||
int32_t qwCancelTask(SQWorker *mgmt, uint64_t sId, uint64_t qId, uint64_t tId) {
|
||||
SQWSchStatus *sch = NULL;
|
||||
SQWTaskStatus *task = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
/*
|
||||
QW_ERR_RET(qwAcquireAddScheduler(QW_READ, mgmt, sId, &sch));
|
||||
|
||||
QW_ERR_JRET(qwAcquireAddTask(mgmt, QW_READ, sch, qId, tId, JOB_TASK_STATUS_NOT_START, &task));
|
||||
|
||||
|
||||
QW_LOCK(QW_WRITE, &task->lock);
|
||||
|
||||
task->cancel = true;
|
||||
|
||||
int8_t oriStatus = task->status;
|
||||
int8_t newStatus = 0;
|
||||
|
||||
if (task->status == JOB_TASK_STATUS_CANCELLED || task->status == JOB_TASK_STATUS_NOT_START || task->status ==
|
||||
JOB_TASK_STATUS_CANCELLING || task->status == JOB_TASK_STATUS_DROPPING) { QW_UNLOCK(QW_WRITE, &task->lock);
|
||||
|
||||
qwReleaseTask(QW_READ, sch);
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (task->status == JOB_TASK_STATUS_FAILED || task->status == JOB_TASK_STATUS_SUCCEED || task->status ==
|
||||
JOB_TASK_STATUS_PARTIAL_SUCCEED) { QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_CANCELLED)); } else {
|
||||
QW_ERR_JRET(qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_CANCELLING));
|
||||
}
|
||||
|
||||
QW_UNLOCK(QW_WRITE, &task->lock);
|
||||
|
||||
qwReleaseTask(QW_READ, sch);
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
|
||||
if (oriStatus == JOB_TASK_STATUS_EXECUTING) {
|
||||
//TODO call executer to cancel subquery async
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
||||
if (task) {
|
||||
QW_UNLOCK(QW_WRITE, &task->lock);
|
||||
|
||||
qwReleaseTask(QW_READ, sch);
|
||||
}
|
||||
|
||||
if (sch) {
|
||||
qwReleaseScheduler(QW_READ, mgmt);
|
||||
}
|
||||
*/
|
||||
|
||||
QW_RET(code);
|
||||
}
|
||||
|
|
|
@ -127,15 +127,6 @@ void qwtBuildQueryReqMsg(SRpcMsg *queryRpc) {
|
|||
queryRpc->contLen = sizeof(SSubQueryMsg) + 100;
|
||||
}
|
||||
|
||||
void qwtBuildReadyReqMsg(SResReadyReq *readyMsg, SRpcMsg *readyRpc) {
|
||||
readyMsg->sId = htobe64(1);
|
||||
readyMsg->queryId = htobe64(atomic_load_64(&qwtTestQueryId));
|
||||
readyMsg->taskId = htobe64(1);
|
||||
readyRpc->msgType = TDMT_VND_RES_READY;
|
||||
readyRpc->pCont = readyMsg;
|
||||
readyRpc->contLen = sizeof(SResReadyReq);
|
||||
}
|
||||
|
||||
void qwtBuildFetchReqMsg(SResFetchReq *fetchMsg, SRpcMsg *fetchRpc) {
|
||||
fetchMsg->sId = htobe64(1);
|
||||
fetchMsg->queryId = htobe64(atomic_load_64(&qwtTestQueryId));
|
||||
|
@ -154,13 +145,6 @@ void qwtBuildDropReqMsg(STaskDropReq *dropMsg, SRpcMsg *dropRpc) {
|
|||
dropRpc->contLen = sizeof(STaskDropReq);
|
||||
}
|
||||
|
||||
void qwtBuildStatusReqMsg(SSchTasksStatusReq *statusMsg, SRpcMsg *statusRpc) {
|
||||
statusMsg->sId = htobe64(1);
|
||||
statusRpc->pCont = statusMsg;
|
||||
statusRpc->contLen = sizeof(SSchTasksStatusReq);
|
||||
statusRpc->msgType = TDMT_VND_TASKS_STATUS;
|
||||
}
|
||||
|
||||
int32_t qwtStringToPlan(const char* str, SSubplan** subplan) {
|
||||
*subplan = (SSubplan *)0x1;
|
||||
return 0;
|
||||
|
@ -222,10 +206,7 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) {
|
|||
case TDMT_VND_QUERY_RSP: {
|
||||
SQueryTableRsp *rsp = (SQueryTableRsp *)pRsp->pCont;
|
||||
|
||||
if (0 == pRsp->code) {
|
||||
qwtBuildReadyReqMsg(&qwtreadyMsg, &qwtreadyRpc);
|
||||
qwtPutReqToFetchQueue((void *)0x1, &qwtreadyRpc);
|
||||
} else {
|
||||
if (pRsp->code) {
|
||||
qwtBuildDropReqMsg(&qwtdropMsg, &qwtdropRpc);
|
||||
qwtPutReqToFetchQueue((void *)0x1, &qwtdropRpc);
|
||||
}
|
||||
|
@ -233,19 +214,6 @@ void qwtRpcSendResponse(const SRpcMsg *pRsp) {
|
|||
rpcFreeCont(rsp);
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_RES_READY_RSP: {
|
||||
SResReadyRsp *rsp = (SResReadyRsp *)pRsp->pCont;
|
||||
|
||||
if (0 == pRsp->code) {
|
||||
qwtBuildFetchReqMsg(&qwtfetchMsg, &qwtfetchRpc);
|
||||
qwtPutReqToFetchQueue((void *)0x1, &qwtfetchRpc);
|
||||
} else {
|
||||
qwtBuildDropReqMsg(&qwtdropMsg, &qwtdropRpc);
|
||||
qwtPutReqToFetchQueue((void *)0x1, &qwtdropRpc);
|
||||
}
|
||||
rpcFreeCont(rsp);
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_FETCH_RSP: {
|
||||
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)pRsp->pCont;
|
||||
|
||||
|
@ -679,28 +647,6 @@ void *queryThread(void *param) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void *readyThread(void *param) {
|
||||
SRpcMsg readyRpc = {0};
|
||||
int32_t code = 0;
|
||||
uint32_t n = 0;
|
||||
void *mockPointer = (void *)0x1;
|
||||
void *mgmt = param;
|
||||
SResReadyReq readyMsg = {0};
|
||||
|
||||
while (!qwtTestStop) {
|
||||
qwtBuildReadyReqMsg(&readyMsg, &readyRpc);
|
||||
code = qWorkerProcessReadyMsg(mockPointer, mgmt, &readyRpc);
|
||||
if (qwtTestEnableSleep) {
|
||||
taosUsleep(taosRand()%5);
|
||||
}
|
||||
if (++n % qwtTestPrintNum == 0) {
|
||||
printf("ready:%d\n", n);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *fetchThread(void *param) {
|
||||
SRpcMsg fetchRpc = {0};
|
||||
int32_t code = 0;
|
||||
|
@ -745,29 +691,6 @@ void *dropThread(void *param) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void *statusThread(void *param) {
|
||||
SRpcMsg statusRpc = {0};
|
||||
int32_t code = 0;
|
||||
uint32_t n = 0;
|
||||
void *mockPointer = (void *)0x1;
|
||||
void *mgmt = param;
|
||||
SSchTasksStatusReq statusMsg = {0};
|
||||
|
||||
while (!qwtTestStop) {
|
||||
qwtBuildStatusReqMsg(&statusMsg, &statusRpc);
|
||||
code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
|
||||
if (qwtTestEnableSleep) {
|
||||
taosUsleep(taosRand()%5);
|
||||
}
|
||||
if (++n % qwtTestPrintNum == 0) {
|
||||
printf("status:%d\n", n);
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void *qwtclientThread(void *param) {
|
||||
int32_t code = 0;
|
||||
uint32_t n = 0;
|
||||
|
@ -894,12 +817,6 @@ void *fetchQueueThread(void *param) {
|
|||
case TDMT_VND_FETCH:
|
||||
qWorkerProcessFetchMsg(mockPointer, mgmt, fetchRpc);
|
||||
break;
|
||||
case TDMT_VND_RES_READY:
|
||||
qWorkerProcessReadyMsg(mockPointer, mgmt, fetchRpc);
|
||||
break;
|
||||
case TDMT_VND_TASKS_STATUS:
|
||||
qWorkerProcessStatusMsg(mockPointer, mgmt, fetchRpc);
|
||||
break;
|
||||
case TDMT_VND_CANCEL_TASK:
|
||||
qWorkerProcessCancelMsg(mockPointer, mgmt, fetchRpc);
|
||||
break;
|
||||
|
@ -934,15 +851,12 @@ TEST(seqTest, normalCase) {
|
|||
int32_t code = 0;
|
||||
void *mockPointer = (void *)0x1;
|
||||
SRpcMsg queryRpc = {0};
|
||||
SRpcMsg readyRpc = {0};
|
||||
SRpcMsg fetchRpc = {0};
|
||||
SRpcMsg dropRpc = {0};
|
||||
SRpcMsg statusRpc = {0};
|
||||
|
||||
qwtInitLogFile();
|
||||
|
||||
qwtBuildQueryReqMsg(&queryRpc);
|
||||
qwtBuildReadyReqMsg(&qwtreadyMsg, &readyRpc);
|
||||
qwtBuildFetchReqMsg(&qwtfetchMsg, &fetchRpc);
|
||||
qwtBuildDropReqMsg(&qwtdropMsg, &dropRpc);
|
||||
|
||||
|
@ -976,10 +890,6 @@ TEST(seqTest, normalCase) {
|
|||
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
|
||||
code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
qWorkerDestroy(&mgmt);
|
||||
}
|
||||
|
||||
|
@ -989,13 +899,11 @@ TEST(seqTest, cancelFirst) {
|
|||
void *mockPointer = (void *)0x1;
|
||||
SRpcMsg queryRpc = {0};
|
||||
SRpcMsg dropRpc = {0};
|
||||
SRpcMsg statusRpc = {0};
|
||||
|
||||
qwtInitLogFile();
|
||||
|
||||
qwtBuildQueryReqMsg(&queryRpc);
|
||||
qwtBuildDropReqMsg(&qwtdropMsg, &dropRpc);
|
||||
qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
|
||||
|
||||
stubSetStringToPlan();
|
||||
stubSetRpcSendResponse();
|
||||
|
@ -1006,24 +914,12 @@ TEST(seqTest, cancelFirst) {
|
|||
code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
|
||||
code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
|
||||
code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc);
|
||||
ASSERT_TRUE(0 != code);
|
||||
|
||||
qwtBuildStatusReqMsg(&qwtstatusMsg, &statusRpc);
|
||||
code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
qWorkerDestroy(&mgmt);
|
||||
}
|
||||
|
||||
|
@ -1087,9 +983,6 @@ TEST(seqTest, randCase) {
|
|||
}
|
||||
} else if (r >= maxr * 4/5 && r < maxr-1) {
|
||||
printf("Status,%d\n", t++);
|
||||
qwtBuildStatusReqMsg(&statusMsg, &statusRpc);
|
||||
code = qWorkerProcessStatusMsg(mockPointer, mgmt, &statusRpc);
|
||||
ASSERT_EQ(code, 0);
|
||||
if (qwtTestEnableSleep) {
|
||||
taosUsleep(1);
|
||||
}
|
||||
|
@ -1137,7 +1030,6 @@ TEST(seqTest, multithreadRand) {
|
|||
//taosThreadCreate(&(t2), &thattr, readyThread, NULL);
|
||||
taosThreadCreate(&(t3), &thattr, fetchThread, NULL);
|
||||
taosThreadCreate(&(t4), &thattr, dropThread, NULL);
|
||||
taosThreadCreate(&(t5), &thattr, statusThread, NULL);
|
||||
taosThreadCreate(&(t6), &thattr, fetchQueueThread, mgmt);
|
||||
|
||||
while (true) {
|
||||
|
|
|
@ -8,7 +8,7 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
|
|||
ADD_EXECUTABLE(scalarTest ${SOURCE_LIST})
|
||||
TARGET_LINK_LIBRARIES(
|
||||
scalarTest
|
||||
PUBLIC os util common gtest qcom function nodes scalar parser
|
||||
PUBLIC os util common gtest qcom function nodes scalar parser catalog transport
|
||||
)
|
||||
|
||||
TARGET_INCLUDE_DIRECTORIES(
|
||||
|
|
|
@ -297,7 +297,7 @@ void schFreeRpcCtx(SRpcCtx *pCtx);
|
|||
int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp);
|
||||
bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus);
|
||||
int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask);
|
||||
int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp);
|
||||
int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp);
|
||||
int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRsp *pRsp);
|
||||
void schProcessOnDataFetched(SSchJob *job);
|
||||
int32_t schGetTaskFromTaskList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTask);
|
||||
|
|
|
@ -1067,7 +1067,7 @@ int32_t schProcessOnExplainDone(SSchJob *pJob, SSchTask *pTask, SRetrieveTableRs
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t schSaveJobQueryRes(SSchJob *pJob, SResReadyRsp *rsp) {
|
||||
int32_t schSaveJobQueryRes(SSchJob *pJob, SQueryTableRsp *rsp) {
|
||||
if (rsp->tbFName[0]) {
|
||||
if (NULL == pJob->queryRes) {
|
||||
pJob->queryRes = taosArrayInit(pJob->taskNum, sizeof(STbVerInfo));
|
||||
|
|
|
@ -31,7 +31,7 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy
|
|||
case TDMT_VND_EXPLAIN_RSP:
|
||||
return TSDB_CODE_SUCCESS;
|
||||
case TDMT_VND_QUERY_RSP: // query_rsp may be processed later than ready_rsp
|
||||
if (lastMsgType != reqMsgType && -1 != lastMsgType && TDMT_VND_FETCH != lastMsgType) {
|
||||
if (lastMsgType != reqMsgType && -1 != lastMsgType) {
|
||||
SCH_TASK_DLOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType),
|
||||
TMSG_INFO(msgType));
|
||||
}
|
||||
|
@ -41,22 +41,6 @@ int32_t schValidateReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgTy
|
|||
TMSG_INFO(msgType));
|
||||
}
|
||||
|
||||
SCH_SET_TASK_LASTMSG_TYPE(pTask, -1);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
case TDMT_VND_RES_READY_RSP:
|
||||
reqMsgType = TDMT_VND_QUERY;
|
||||
if (lastMsgType != reqMsgType && -1 != lastMsgType) {
|
||||
SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s",
|
||||
(lastMsgType > 0 ? TMSG_INFO(lastMsgType) : "null"), TMSG_INFO(msgType));
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus),
|
||||
TMSG_INFO(msgType));
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
SCH_SET_TASK_LASTMSG_TYPE(pTask, -1);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
case TDMT_VND_FETCH_RSP:
|
||||
|
@ -231,24 +215,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
|
|||
break;
|
||||
}
|
||||
case TDMT_VND_QUERY_RSP: {
|
||||
SQueryTableRsp rsp = {0};
|
||||
if (msg) {
|
||||
SCH_ERR_JRET(tDeserializeSQueryTableRsp(msg, msgSize, &rsp));
|
||||
SCH_ERR_JRET(rsp.code);
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(rspCode);
|
||||
|
||||
if (NULL == msg) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
||||
// SCH_ERR_JRET(schBuildAndSendMsg(pJob, pTask, NULL, TDMT_VND_RES_READY));
|
||||
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_RES_READY_RSP: {
|
||||
SResReadyRsp *rsp = (SResReadyRsp *)msg;
|
||||
SQueryTableRsp *rsp = (SQueryTableRsp *)msg;
|
||||
|
||||
SCH_ERR_JRET(rspCode);
|
||||
if (NULL == msg) {
|
||||
|
@ -429,10 +396,6 @@ int32_t schHandleFetchCallback(void *param, const SDataBuf *pMsg, int32_t code)
|
|||
return schHandleCallback(param, pMsg, TDMT_VND_FETCH_RSP, code);
|
||||
}
|
||||
|
||||
int32_t schHandleReadyCallback(void *param, const SDataBuf *pMsg, int32_t code) {
|
||||
return schHandleCallback(param, pMsg, TDMT_VND_RES_READY_RSP, code);
|
||||
}
|
||||
|
||||
int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code) {
|
||||
return schHandleCallback(param, pMsg, TDMT_VND_EXPLAIN_RSP, code);
|
||||
}
|
||||
|
@ -518,9 +481,6 @@ int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) {
|
|||
case TDMT_VND_QUERY:
|
||||
*fp = schHandleQueryCallback;
|
||||
break;
|
||||
case TDMT_VND_RES_READY:
|
||||
*fp = schHandleReadyCallback;
|
||||
break;
|
||||
case TDMT_VND_EXPLAIN:
|
||||
*fp = schHandleExplainCallback;
|
||||
break;
|
||||
|
@ -933,7 +893,6 @@ _return:
|
|||
|
||||
int32_t schMakeQueryRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) {
|
||||
int32_t code = 0;
|
||||
SMsgSendInfo *pReadyMsgSendInfo = NULL;
|
||||
SMsgSendInfo *pExplainMsgSendInfo = NULL;
|
||||
|
||||
pCtx->args = taosHashInit(1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_ENTRY_LOCK);
|
||||
|
@ -942,18 +901,10 @@ int32_t schMakeQueryRpcCtx(SSchJob *pJob, SSchTask *pTask, SRpcCtx *pCtx) {
|
|||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, TDMT_VND_RES_READY, &pReadyMsgSendInfo));
|
||||
SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, TDMT_VND_EXPLAIN, &pExplainMsgSendInfo));
|
||||
|
||||
int32_t msgType = TDMT_VND_RES_READY_RSP;
|
||||
SRpcCtxVal ctxVal = {.val = pReadyMsgSendInfo, .clone = schCloneSMsgSendInfo};
|
||||
if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) {
|
||||
SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
msgType = TDMT_VND_EXPLAIN_RSP;
|
||||
ctxVal.val = pExplainMsgSendInfo;
|
||||
int32_t msgType = TDMT_VND_EXPLAIN_RSP;
|
||||
SRpcCtxVal ctxVal = {.val = pExplainMsgSendInfo, .clone = schCloneSMsgSendInfo};
|
||||
if (taosHashPut(pCtx->args, &msgType, sizeof(msgType), &ctxVal, sizeof(ctxVal))) {
|
||||
SCH_TASK_ELOG("taosHashPut msg %d to rpcCtx failed", msgType);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
|
@ -968,11 +919,6 @@ _return:
|
|||
|
||||
taosHashCleanup(pCtx->args);
|
||||
|
||||
if (pReadyMsgSendInfo) {
|
||||
taosMemoryFreeClear(pReadyMsgSendInfo->param);
|
||||
taosMemoryFreeClear(pReadyMsgSendInfo);
|
||||
}
|
||||
|
||||
if (pExplainMsgSendInfo) {
|
||||
taosMemoryFreeClear(pExplainMsgSendInfo->param);
|
||||
taosMemoryFreeClear(pExplainMsgSendInfo);
|
||||
|
@ -1128,24 +1074,6 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
|
|||
persistHandle = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case TDMT_VND_RES_READY: {
|
||||
msgSize = sizeof(SResReadyReq);
|
||||
msg = taosMemoryCalloc(1, msgSize);
|
||||
if (NULL == msg) {
|
||||
SCH_TASK_ELOG("calloc %d failed", msgSize);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SResReadyReq *pMsg = msg;
|
||||
|
||||
pMsg->header.vgId = htonl(addr->nodeId);
|
||||
|
||||
pMsg->sId = htobe64(schMgmt.sId);
|
||||
pMsg->queryId = htobe64(pJob->queryId);
|
||||
pMsg->taskId = htobe64(pTask->taskId);
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_FETCH: {
|
||||
msgSize = sizeof(SResFetchReq);
|
||||
msg = taosMemoryCalloc(1, msgSize);
|
||||
|
|
|
@ -542,27 +542,6 @@ void* schtRunJobThread(void *aa) {
|
|||
pIter = taosHashIterate(execTasks, pIter);
|
||||
}
|
||||
|
||||
|
||||
param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param));
|
||||
param->refId = queryJobRefId;
|
||||
param->queryId = pJob->queryId;
|
||||
|
||||
pIter = taosHashIterate(execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = (SSchTask *)pIter;
|
||||
|
||||
param->taskId = task->taskId;
|
||||
SResReadyRsp rsp = {0};
|
||||
dataBuf.pData = &rsp;
|
||||
dataBuf.len = sizeof(rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0);
|
||||
assert(code == 0 || code);
|
||||
|
||||
pIter = taosHashIterate(execTasks, pIter);
|
||||
}
|
||||
|
||||
|
||||
param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param));
|
||||
param->refId = queryJobRefId;
|
||||
param->queryId = pJob->queryId;
|
||||
|
@ -583,25 +562,6 @@ void* schtRunJobThread(void *aa) {
|
|||
}
|
||||
|
||||
|
||||
param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param));
|
||||
param->refId = queryJobRefId;
|
||||
param->queryId = pJob->queryId;
|
||||
|
||||
pIter = taosHashIterate(execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = (SSchTask *)pIter;
|
||||
|
||||
param->taskId = task->taskId - 1;
|
||||
SResReadyRsp rsp = {0};
|
||||
dataBuf.pData = &rsp;
|
||||
dataBuf.len = sizeof(rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_VND_RES_READY_RSP, 0);
|
||||
assert(code == 0 || code);
|
||||
|
||||
pIter = taosHashIterate(execTasks, pIter);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
if (queryDone) {
|
||||
break;
|
||||
|
@ -701,17 +661,6 @@ TEST(queryTest, normalCase) {
|
|||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SResReadyRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
printf("code:%d", code);
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
@ -723,17 +672,6 @@ TEST(queryTest, normalCase) {
|
|||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SResReadyRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
if (queryDone) {
|
||||
break;
|
||||
|
@ -804,19 +742,8 @@ TEST(queryTest, readyFirstCase) {
|
|||
|
||||
|
||||
SSchJob *pJob = schAcquireJob(job);
|
||||
|
||||
void *pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SResReadyRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
printf("code:%d", code);
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
void *pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
|
@ -827,17 +754,6 @@ TEST(queryTest, readyFirstCase) {
|
|||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SResReadyRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
@ -942,10 +858,6 @@ TEST(queryTest, flowCtrlCase) {
|
|||
SQueryTableRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_VND_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
} else if (task->lastMsgType == TDMT_VND_RES_READY) {
|
||||
SResReadyRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_VND_RES_READY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
ASSERT_EQ(code, 0);
|
||||
} else {
|
||||
qDone = true;
|
||||
|
|
|
@ -60,7 +60,9 @@ void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId,
|
|||
return;
|
||||
}
|
||||
}
|
||||
assert(0);
|
||||
|
||||
// maybe config change
|
||||
// assert(0);
|
||||
}
|
||||
|
||||
SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) {
|
||||
|
|
|
@ -981,6 +981,7 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDrop) {
|
||||
SSyncCfg oldConfig = pSyncNode->pRaftCfg->cfg;
|
||||
pSyncNode->pRaftCfg->cfg = *newConfig;
|
||||
int32_t ret = 0;
|
||||
|
||||
|
@ -1014,6 +1015,15 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* newConfig, bool* isDro
|
|||
|
||||
// isDrop
|
||||
*isDrop = true;
|
||||
bool IamInOld, IamInNew;
|
||||
for (int i = 0; i < oldConfig.replicaNum; ++i) {
|
||||
if (strcmp((oldConfig.nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 &&
|
||||
(oldConfig.nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) {
|
||||
*isDrop = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < newConfig->replicaNum; ++i) {
|
||||
if (strcmp((newConfig->nodeInfo)[i].nodeFqdn, pSyncNode->myNodeInfo.nodeFqdn) == 0 &&
|
||||
(newConfig->nodeInfo)[i].nodePort == pSyncNode->myNodeInfo.nodePort) {
|
||||
|
|
|
@ -112,7 +112,8 @@ typedef struct SCvtAddr {
|
|||
} SCvtAddr;
|
||||
|
||||
typedef struct {
|
||||
SEpSet epSet; // ip list provided by app
|
||||
SEpSet epSet; // ip list provided by app
|
||||
SEpSet origEpSet;
|
||||
void* ahandle; // handle provided by app
|
||||
tmsg_t msgType; // message type
|
||||
int8_t connType; // connection type cli/srv
|
||||
|
@ -217,6 +218,22 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
|
|||
void transDestroyAsyncPool(SAsyncPool* pool);
|
||||
int transSendAsync(SAsyncPool* pool, queue* mq);
|
||||
|
||||
#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc) \
|
||||
do { \
|
||||
for (int i = 0; i < pool->nAsync; i++) { \
|
||||
uv_async_t* async = &(pool->asyncs[i]); \
|
||||
SAsyncItem* item = async->data; \
|
||||
while (!QUEUE_IS_EMPTY(&item->qmsg)) { \
|
||||
tTrace("destroy msg in async pool "); \
|
||||
queue* h = QUEUE_HEAD(&item->qmsg); \
|
||||
QUEUE_REMOVE(h); \
|
||||
msgType* msg = QUEUE_DATA(h, msgType, q); \
|
||||
if (msg != NULL) { \
|
||||
freeFunc(msg); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
int transInitBuffer(SConnBuffer* buf);
|
||||
int transClearBuffer(SConnBuffer* buf);
|
||||
int transDestroyBuffer(SConnBuffer* buf);
|
||||
|
@ -328,6 +345,7 @@ void transDQDestroy(SDelayQueue* queue);
|
|||
int transDQSched(SDelayQueue* queue, void (*func)(void* arg), void* arg, uint64_t timeoutMs);
|
||||
|
||||
void transPrintEpSet(SEpSet* pEpSet);
|
||||
bool transEpSetIsEqual(SEpSet* a, SEpSet* b);
|
||||
/*
|
||||
* init global func
|
||||
*/
|
||||
|
|
|
@ -892,6 +892,7 @@ static void destroyThrdObj(SCliThrdObj* pThrd) {
|
|||
taosThreadJoin(pThrd->thread, NULL);
|
||||
CLI_RELEASE_UV(pThrd->loop);
|
||||
taosThreadMutexDestroy(&pThrd->msgMtx);
|
||||
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsg);
|
||||
transDestroyAsyncPool(pThrd->asyncPool);
|
||||
|
||||
transDQDestroy(pThrd->delayQueue);
|
||||
|
@ -946,6 +947,9 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
|
|||
SEpSet* pEpSet = &pCtx->epSet;
|
||||
transPrintEpSet(pEpSet);
|
||||
|
||||
if (pCtx->retryCount == 0) {
|
||||
pCtx->origEpSet = pCtx->epSet;
|
||||
}
|
||||
/*
|
||||
* upper layer handle retry if code equal TSDB_CODE_RPC_NETWORK_UNAVAIL
|
||||
*/
|
||||
|
@ -971,9 +975,9 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
|
|||
if (pResp->contLen == 0) {
|
||||
pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps;
|
||||
} else {
|
||||
SMEpSet emsg = {0};
|
||||
tDeserializeSMEpSet(pResp->pCont, pResp->contLen, &emsg);
|
||||
pCtx->epSet = emsg.epSet;
|
||||
SEpSet epSet = {0};
|
||||
tDeserializeSEpSet(pResp->pCont, pResp->contLen, &epSet);
|
||||
pCtx->epSet = epSet;
|
||||
}
|
||||
addConnToPool(pThrd->pool, pConn);
|
||||
tTrace("use remote epset, current in use: %d, retry count:%d, try limit: %d", pEpSet->inUse, pCtx->retryCount + 1,
|
||||
|
@ -998,7 +1002,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
|
|||
pCtx->pRsp = NULL;
|
||||
} else {
|
||||
tTrace("%s cli conn %p handle resp", pTransInst->label, pConn);
|
||||
if (pResp->code != 0) {
|
||||
if (pResp->code != 0 || pCtx->retryCount == 0 || transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) {
|
||||
pTransInst->cfp(pTransInst->parent, pResp, NULL);
|
||||
} else {
|
||||
pTransInst->cfp(pTransInst->parent, pResp, pEpSet);
|
||||
|
|
|
@ -190,6 +190,7 @@ SAsyncPool* transCreateAsyncPool(uv_loop_t* loop, int sz, void* arg, AsyncCB cb)
|
|||
}
|
||||
return pool;
|
||||
}
|
||||
|
||||
void transDestroyAsyncPool(SAsyncPool* pool) {
|
||||
for (int i = 0; i < pool->nAsync; i++) {
|
||||
uv_async_t* async = &(pool->asyncs[i]);
|
||||
|
@ -452,10 +453,21 @@ void transPrintEpSet(SEpSet* pEpSet) {
|
|||
tTrace("NULL epset");
|
||||
return;
|
||||
}
|
||||
tTrace("epset begin: inUse: %d", pEpSet->inUse);
|
||||
tTrace("epset begin inUse: %d", pEpSet->inUse);
|
||||
for (int i = 0; i < pEpSet->numOfEps; i++) {
|
||||
tTrace("ip: %s, port: %d", pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
|
||||
}
|
||||
tTrace("epset end");
|
||||
}
|
||||
bool transEpSetIsEqual(SEpSet* a, SEpSet* b) {
|
||||
if (a->numOfEps != b->numOfEps || a->inUse != b->inUse) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < a->numOfEps; i++) {
|
||||
if (strncmp(a->eps[i].fqdn, b->eps[i].fqdn, TSDB_FQDN_LEN) != 0 || a->eps[i].port != b->eps[i].port) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1036,6 +1036,7 @@ void destroyWorkThrd(SWorkThrdObj* pThrd) {
|
|||
}
|
||||
taosThreadJoin(pThrd->thread, NULL);
|
||||
SRV_RELEASE_UV(pThrd->loop);
|
||||
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSrvMsg, destroySmsg);
|
||||
transDestroyAsyncPool(pThrd->asyncPool);
|
||||
taosMemoryFree(pThrd->loop);
|
||||
taosMemoryFree(pThrd);
|
||||
|
|
|
@ -447,9 +447,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QW_MSG_ERROR, "Invalid msg order")
|
|||
// parser
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_TABLE_NOT_EXIST, "Table does not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_PERMISSION_DENIED, "Permission denied")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error")
|
||||
|
||||
//planner
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "planner internal error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PLAN_INTERNAL_ERROR, "Planner internal error")
|
||||
|
||||
//udf
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping")
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
$db = csaa_db
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
|
|
|
@ -1,13 +1,8 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
|
||||
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
|
||||
|
||||
print ========= start dnode1 as master
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 2
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
run general/alter/cached_schema_after_alter.sim
|
||||
run general/alter/count.sim
|
||||
run general/alter/import.sim
|
||||
run general/alter/insert1.sim
|
||||
run general/alter/insert2.sim
|
||||
run general/alter/metrics.sim
|
||||
run general/alter/table.sim
|
|
@ -55,8 +55,9 @@
|
|||
./test.sh -f tsim/bnode/basic1.sim
|
||||
|
||||
# ---- mnode
|
||||
#./test.sh -f tsim/mnode/basic1.sim
|
||||
./test.sh -f tsim/mnode/basic1.sim
|
||||
./test.sh -f tsim/mnode/basic2.sim
|
||||
./test.sh -f tsim/mnode/basic3.sim
|
||||
|
||||
# ---- show
|
||||
./test.sh -f tsim/show/basic.sim
|
||||
|
@ -88,18 +89,21 @@
|
|||
./test.sh -f tsim/tmq/topic.sim
|
||||
|
||||
# --- stable
|
||||
./test.sh -f tsim/stable/alter1.sim
|
||||
./test.sh -f tsim/stable/disk.sim
|
||||
./test.sh -f tsim/stable/dnode3.sim
|
||||
./test.sh -f tsim/stable/metrics.sim
|
||||
./test.sh -f tsim/stable/refcount.sim
|
||||
#./test.sh -f tsim/stable/show.sim
|
||||
./test.sh -f tsim/stable/show.sim
|
||||
./test.sh -f tsim/stable/values.sim
|
||||
./test.sh -f tsim/stable/vnode3.sim
|
||||
./test.sh -f tsim/stable/column_add.sim
|
||||
./test.sh -f tsim/stable/column_drop.sim
|
||||
#./test.sh -f tsim/stable/column_modify.sim
|
||||
|
||||
./test.sh -f tsim/stable/column_modify.sim
|
||||
./test.sh -f tsim/stable/tag_add.sim
|
||||
./test.sh -f tsim/stable/tag_drop.sim
|
||||
./test.sh -f tsim/stable/tag_modify.sim
|
||||
./test.sh -f tsim/stable/tag_rename.sim
|
||||
./test.sh -f tsim/stable/alter_comment.sim
|
||||
|
||||
# --- for multi process mode
|
||||
./test.sh -f tsim/user/basic1.sim -m
|
||||
|
|
|
@ -88,7 +88,7 @@ sql show mnodes
|
|||
print $data(1)[0] $data(1)[1] $data(1)[2]
|
||||
print $data(2)[0] $data(2)[1] $data(2)[2]
|
||||
|
||||
if $rows != 2 then
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[0] != 1 then
|
||||
|
@ -97,16 +97,16 @@ endi
|
|||
if $data(1)[2] != LEADER then
|
||||
return -1
|
||||
endi
|
||||
if $data(2)[0] != NULL then
|
||||
if $data(2)[0] != null then
|
||||
goto step2
|
||||
endi
|
||||
if $data(2)[2] != NULL then
|
||||
if $data(2)[2] != null then
|
||||
goto step2
|
||||
endi
|
||||
|
||||
sleep 2000
|
||||
|
||||
print =============== create drop mnodes
|
||||
print =============== create mnodes
|
||||
sql create mnode on dnode 2
|
||||
sql show mnodes
|
||||
if $rows != 2 then
|
||||
|
|
|
@ -119,9 +119,16 @@ if $data(2)[4] != ready then
|
|||
endi
|
||||
|
||||
print =============== insert data
|
||||
#sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
|
||||
#sql create table db.ctb using db.stb tags(101, 102, "103")
|
||||
#sql insert into db.ctb values(now, 1, "2")
|
||||
sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 float, t3 binary(16)) comment "abd"
|
||||
sql create table db.ctb using db.stb tags(101, 102, "103")
|
||||
sql insert into db.ctb values(now, 1, "2")
|
||||
|
||||
sql select * from db.ctb
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop
|
||||
system sh/exec.sh -n dnode2 -s stop
|
|
@ -15,7 +15,7 @@ $x = 0
|
|||
step1:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 20 then
|
||||
if $x == 50 then
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes -x step1
|
||||
|
@ -37,7 +37,7 @@ $x = 0
|
|||
step2:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 20 then
|
||||
if $x == 50 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes -x step2
|
||||
|
@ -68,7 +68,7 @@ $x = 0
|
|||
step4:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 20 then
|
||||
if $x == 50 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes -x step4
|
||||
|
@ -98,7 +98,7 @@ $x = 0
|
|||
step5:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 20 then
|
||||
if $x == 50 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes -x step5
|
||||
|
@ -119,7 +119,7 @@ $x = 0
|
|||
step6:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 20 then
|
||||
if $x == 50 then
|
||||
return -1
|
||||
endi
|
||||
sql show mnodes -x step6
|
||||
|
|
|
@ -166,4 +166,5 @@ if $data[0][6] != abcde then
|
|||
return -1
|
||||
endi
|
||||
|
||||
return
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -1,13 +1,8 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
|
||||
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
|
||||
|
||||
print ========= start dnode1 as master
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
|
@ -141,10 +136,11 @@ endi
|
|||
|
||||
print ============= step10
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
sleep 3000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
||||
sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb;
|
||||
sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from d1.tb;
|
||||
sql select count(a), count(b), count(c), count(d), count(e), count(f), count(g), count(h) from tb
|
||||
if $data00 != 24 then
|
||||
return -1
|
|
@ -47,7 +47,7 @@ endi
|
|||
|
||||
print ========== step2 describe
|
||||
sql describe db.ctb
|
||||
if $rows != 7 then
|
||||
if $rows != 6 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != ts then
|
||||
|
@ -75,4 +75,32 @@ if $data[5][0] != t3 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sql connect
|
||||
sql select * from db.ctb
|
||||
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
#if $data[0][1] != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data[0][2] != 1234 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data[0][3] != 101 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data[1][1] != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data[1][2] != 12345 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data[1][3] != 101 then
|
||||
# return -1
|
||||
#endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -1,17 +1,9 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = d_db
|
||||
$tbPrefix = d_tb
|
||||
$mtPrefix = d_mt
|
||||
|
@ -59,9 +51,8 @@ endi
|
|||
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
sleep 3000
|
||||
sleep 1000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 6000
|
||||
|
||||
sql use $db
|
||||
sql show vgroups
|
||||
|
|
|
@ -1,19 +1,9 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
system sh/deploy.sh -n dnode4 -i 4
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode2 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode3 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode4 -c walLevel -v 1
|
||||
# system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
# system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
|
||||
# system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
|
||||
# system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sql connect
|
||||
|
||||
sql create dnode $hostname PORT 7200
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 1000
|
||||
sql connect
|
||||
|
||||
$dbPrefix = m_me_db
|
||||
|
|
|
@ -1,11 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print =============== step1
|
||||
|
|
|
@ -1,14 +1,9 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======================== create stable
|
||||
|
||||
sql create database d1
|
||||
sql use d1
|
||||
|
||||
|
|
|
@ -0,0 +1,193 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ========== prepare stb and ctb
|
||||
sql create database db vgroups 1
|
||||
sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"
|
||||
sql create table db.ctb using db.stb tags(101, "102")
|
||||
sql insert into db.ctb values(now, 1, "2")
|
||||
|
||||
sql show db.stables
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != stb then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != db then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != abd then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show db.tables
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != ctb then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != db then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != stb then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][9] != CHILD_TABLE then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from db.stb
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 102 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error alter table db.stb add tag ts int
|
||||
sql_error alter table db.stb add tag t1 int
|
||||
sql_error alter table db.stb add tag t2 int
|
||||
sql_error alter table db.stb add tag c1 int
|
||||
sql_error alter table db.stb add tag c2 int
|
||||
|
||||
print ========== step1 add tag t3
|
||||
sql alter table db.stb add tag t3 int
|
||||
|
||||
sql show db.stables
|
||||
if $data[0][3] != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show db.tables
|
||||
if $data[0][3] != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql describe db.ctb
|
||||
if $rows != 6 then
|
||||
return -1
|
||||
endi
|
||||
if $data[5][0] != t3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[5][1] != INT then
|
||||
return -1
|
||||
endi
|
||||
if $data[5][2] != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from db.stb
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 102 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][5] != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2 add tag t4
|
||||
sql alter table db.stb add tag t4 bigint
|
||||
sql select * from db.stb
|
||||
sql select * from db.stb
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 102 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][5] != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error create table db.ctb2 using db.stb tags(101, "102")
|
||||
sql create table db.ctb2 using db.stb tags(101, "102", 103, 104)
|
||||
sql insert into db.ctb2 values(now, 1, "2")
|
||||
|
||||
sql select * from db.stb where tbname = 'ctb2';
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 102 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][5] != 103 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != 104 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step3 describe
|
||||
sql describe db.ctb
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,337 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ========== prepare stb and ctb
|
||||
sql create database db vgroups 1
|
||||
sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd"
|
||||
sql create table db.ctb using db.stb tags(101, "102")
|
||||
sql insert into db.ctb values(now, 1, "2")
|
||||
|
||||
sql show db.stables
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != stb then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != db then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != abd then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show db.tables
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != ctb then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != db then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != stb then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][9] != CHILD_TABLE then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from db.stb
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 102 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error alter table db.stb drop tag ts int
|
||||
sql_error alter table db.stb drop tag t3 int
|
||||
sql_error alter table db.stb drop tag t4 int
|
||||
sql_error alter table db.stb drop tag c1 int
|
||||
sql_error alter table db.stb drop tag c2 int
|
||||
|
||||
print ========== step1 drop tag t2
|
||||
sql alter table db.stb drop tag t2
|
||||
|
||||
sql show db.stables
|
||||
if $data[0][4] != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql describe db.ctb
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][0] != null then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from db.stb
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != null then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2 add tag t3
|
||||
sql alter table db.stb add tag t3 int
|
||||
|
||||
sql show db.stables
|
||||
if $data[0][4] != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql describe db.ctb
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][0] != t3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][1] != INT then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][2] != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from db.stb
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step3 add tag t4
|
||||
sql alter table db.stb add tag t4 bigint
|
||||
sql select * from db.stb
|
||||
sql select * from db.stb
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][5] != NULL then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != null then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error create table db.ctb2 using db.stb tags(101, "102")
|
||||
sql create table db.ctb2 using db.stb tags(201, 202, 203)
|
||||
sql insert into db.ctb2 values(now, 1, "2")
|
||||
|
||||
sql select * from db.stb where tbname = 'ctb2';
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 201 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 202 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][5] != 203 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step4 describe
|
||||
sql describe db.ctb
|
||||
if $rows != 6 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step5 add tag2
|
||||
sql alter table db.stb add tag t2 bigint
|
||||
sql select * from db.stb where tbname = 'ctb2';
|
||||
sql select * from db.stb where tbname = 'ctb2';
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 201 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 202 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][5] != 203 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != NULL then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error create table db.ctb2 using db.stb tags(101, "102")
|
||||
sql_error create table db.ctb2 using db.stb tags(201, 202, 203)
|
||||
sql create table db.ctb3 using db.stb tags(301, 302, 303, 304)
|
||||
sql insert into db.ctb3 values(now, 1, "2")
|
||||
|
||||
sql select * from db.stb where tbname = 'ctb3';
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 301 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 302 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][5] != 303 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != 304 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step6 describe
|
||||
sql describe db.ctb
|
||||
if $rows != 7 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[3][0] != t1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][0] != t3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[5][0] != t4 then
|
||||
return -1
|
||||
endi
|
||||
if $data[6][0] != t2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[6][1] != BIGINT then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step7 drop tag t1
|
||||
sql alter table db.stb drop tag t1
|
||||
|
||||
sql show db.stables
|
||||
if $data[0][4] != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql describe db.ctb
|
||||
if $rows != 6 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from db.stb where tbname = 'ctb3';
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 302 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 303 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][5] != 304 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,123 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ========== prepare stb and ctb
|
||||
sql create database db vgroups 1
|
||||
sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(4)) comment "abd"
|
||||
|
||||
sql_error alter table db.stb MODIFY tag c2 binary(3)
|
||||
sql_error alter table db.stb MODIFY tag c2 int
|
||||
sql_error alter table db.stb MODIFY tag c1 int
|
||||
sql_error alter table db.stb MODIFY tag ts int
|
||||
sql_error alter table db.stb MODIFY tag t2 binary(3)
|
||||
sql_error alter table db.stb MODIFY tag t2 int
|
||||
sql_error alter table db.stb MODIFY tag t1 int
|
||||
sql create table db.ctb using db.stb tags(101, "12345")
|
||||
sql insert into db.ctb values(now, 1, "1234")
|
||||
|
||||
sql select * from db.stb
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 1234 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 1234 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step1 modify tag
|
||||
sql alter table db.stb MODIFY tag t2 binary(5)
|
||||
sql select * from db.stb
|
||||
|
||||
sql create table db.ctb2 using db.stb tags(101, "12345")
|
||||
sql insert into db.ctb2 values(now, 1, "1234")
|
||||
|
||||
sql select * from db.stb where tbname = 'ctb2';
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 1234 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 12345 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2 describe
|
||||
sql describe db.ctb2
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != ts then
|
||||
return -1
|
||||
endi
|
||||
if $data[1][0] != c1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[2][0] != c2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[3][0] != t1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][0] != t2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][1] != VARCHAR then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][2] != 5 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sql connect
|
||||
sql describe db.ctb2
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != ts then
|
||||
return -1
|
||||
endi
|
||||
if $data[1][0] != c1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[2][0] != c2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[3][0] != t1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][0] != t2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][1] != VARCHAR then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][2] != 5 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,120 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ========== prepare stb and ctb
|
||||
sql create database db vgroups 1
|
||||
sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(4)) comment "abd"
|
||||
|
||||
sql_error alter table db.stb rename tag c2 c3
|
||||
sql_error alter table db.stb rename tag c2 c3
|
||||
sql_error alter table db.stb rename tag c1 c3
|
||||
sql_error alter table db.stb rename tag ts c3
|
||||
sql_error alter table db.stb rename tag t2 t1
|
||||
sql_error alter table db.stb rename tag t2 t2
|
||||
sql_error alter table db.stb rename tag t1 t2
|
||||
sql create table db.ctb using db.stb tags(101, "12345")
|
||||
sql insert into db.ctb values(now, 1, "1234")
|
||||
|
||||
sql select * from db.stb
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 1234 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 1234 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step1 rename tag
|
||||
sql alter table db.stb rename tag t1 t3
|
||||
sql select * from db.stb
|
||||
sql select * from db.stb
|
||||
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != 1234 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][3] != 101 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][4] != 1234 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print ========== step2 describe
|
||||
sql describe db.ctb
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != ts then
|
||||
return -1
|
||||
endi
|
||||
if $data[1][0] != c1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[2][0] != c2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[3][0] != t3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][0] != t2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][1] != VARCHAR then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][2] != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sql connect
|
||||
sql describe db.ctb
|
||||
if $rows != 5 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != ts then
|
||||
return -1
|
||||
endi
|
||||
if $data[1][0] != c1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[2][0] != c2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[3][0] != t3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][0] != t2 then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][1] != VARCHAR then
|
||||
return -1
|
||||
endi
|
||||
if $data[4][2] != 4 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -1,16 +1,9 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
sql create database vdb0
|
||||
sql create table vdb0.mt (ts timestamp, tbcol int) TAGS(tgcol int)
|
||||
|
||||
|
|
|
@ -1,16 +1,9 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = v3_db
|
||||
$tbPrefix = v3_tb
|
||||
$mtPrefix = v3_mt
|
||||
|
|
|
@ -71,7 +71,7 @@ print ====> start to check if there are ERRORS in vagrind log file for each dnod
|
|||
# -n : dnode[x] be check
|
||||
system_content sh/checkValgrind.sh -n dnode1
|
||||
print cmd return result----> [ $system_content ]
|
||||
if $system_content <= 1 then
|
||||
if $system_content <= 3 then
|
||||
return 0
|
||||
endi
|
||||
|
||||
|
|
|
@ -175,16 +175,17 @@ class TDTestCase:
|
|||
|
||||
tdLog.printNoPrefix("==========step10:invalid query type")
|
||||
|
||||
tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
|
||||
tdSql.checkRows(23)
|
||||
# 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
|
||||
tdSql.query("select * from supt where isused between 0 and 1")
|
||||
tdSql.checkRows(23)
|
||||
tdSql.query("select * from supt where isused between -1 and 0")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.error("select * from supt where isused between false and true")
|
||||
tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
|
||||
tdSql.checkRows(23)
|
||||
# TODO tag is not finished
|
||||
# tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
|
||||
# tdSql.checkRows(23)
|
||||
# # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
|
||||
# tdSql.query("select * from supt where isused between 0 and 1")
|
||||
# tdSql.checkRows(23)
|
||||
# tdSql.query("select * from supt where isused between -1 and 0")
|
||||
# tdSql.checkRows(0)
|
||||
# tdSql.error("select * from supt where isused between false and true")
|
||||
# tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
|
||||
# tdSql.checkRows(23)
|
||||
|
||||
tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type")
|
||||
|
||||
|
|
|
@ -36,19 +36,19 @@ class TDTestCase:
|
|||
concat_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
f"upper( {char_col} )",
|
||||
# f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
# concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
# concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
# concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
|
||||
|
||||
concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
@ -96,7 +96,6 @@ class TDTestCase:
|
|||
|
||||
[ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
def __concat_err_check(self,tbname):
|
||||
sqls = []
|
||||
|
||||
|
@ -139,7 +138,11 @@ class TDTestCase:
|
|||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
tbname = [
|
||||
"ct1",
|
||||
"ct2",
|
||||
"ct4",
|
||||
]
|
||||
for tb in tbname:
|
||||
for i in range(2,8):
|
||||
self.__concat_check(tb,i)
|
||||
|
@ -147,7 +150,10 @@ class TDTestCase:
|
|||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
tbname = [
|
||||
"t1",
|
||||
"stb1",
|
||||
]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__concat_err_check(tb):
|
||||
|
|
|
@ -0,0 +1,293 @@
|
|||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __concat_condition(self): # sourcery skip: extract-method
|
||||
concat_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
concat_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
# f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
# concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
# concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
# concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
|
||||
|
||||
concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
||||
concat_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
|
||||
|
||||
return concat_condition
|
||||
|
||||
def __where_condition(self, col):
|
||||
# return f" where count({col}) > 0 "
|
||||
return ""
|
||||
|
||||
def __concat_num(self, concat_lists, num):
|
||||
return [ concat_lists[i] for i in range(num) ]
|
||||
|
||||
|
||||
def __group_condition(self, col, having = ""):
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __concat_check(self, tbname, num):
|
||||
concat_condition = self.__concat_condition()
|
||||
for i in range(len(concat_condition) - num + 1 ):
|
||||
condition = self.__concat_num(concat_condition[i:], num)
|
||||
concat_filter = f"concat( {','.join( condition ) }) "
|
||||
where_condition = self.__where_condition(condition[0])
|
||||
# group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
|
||||
concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " )
|
||||
# group_no_having= self.__group_condition(condition[0] )
|
||||
concat_group_no_having= self.__group_condition(concat_filter)
|
||||
groups = ["", concat_group_having, concat_group_no_having]
|
||||
|
||||
if num > 8 or num < 2 :
|
||||
[tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
break
|
||||
|
||||
tdSql.query(f"select {','.join(condition)} from {tbname} ")
|
||||
rows = tdSql.queryRows
|
||||
concat_data = []
|
||||
for m in range(rows):
|
||||
concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None)
|
||||
tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ")
|
||||
tdSql.checkRows(rows)
|
||||
for j in range(tdSql.queryRows):
|
||||
assert tdSql.getData(j, 0) in concat_data
|
||||
|
||||
[ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
def __concat_err_check(self,tbname):
|
||||
sqls = []
|
||||
|
||||
for char_col in CHAR_COL:
|
||||
sqls.extend(
|
||||
(
|
||||
f"select concat( {char_col} ) from {tbname} ",
|
||||
f"select concat(ceil( {char_col} )) from {tbname} ",
|
||||
f"select {char_col} from {tbname} group by concat( {char_col} ) ",
|
||||
)
|
||||
)
|
||||
|
||||
sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
|
||||
sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
|
||||
sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
|
||||
sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
|
||||
sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL )
|
||||
sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
|
||||
sqls.extend(
|
||||
(
|
||||
f"select concat() from {tbname} ",
|
||||
f"select concat(*) from {tbname} ",
|
||||
f"select concat(ccccccc) from {tbname} ",
|
||||
f"select concat(111) from {tbname} ",
|
||||
)
|
||||
)
|
||||
|
||||
return sqls
|
||||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = [
|
||||
"t1",
|
||||
"stb1",
|
||||
]
|
||||
for tb in tbname:
|
||||
for i in range(2,8):
|
||||
self.__concat_check(tb,i)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
|
||||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = [
|
||||
"ct1",
|
||||
"ct4",
|
||||
]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__concat_err_check(tb):
|
||||
tdSql.error(sql=errsql)
|
||||
self.__concat_check(tb,1)
|
||||
self.__concat_check(tb,9)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__test_current()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -36,22 +36,22 @@ class TDTestCase:
|
|||
concat_ws_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
f"upper( {char_col} )",
|
||||
# f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
# concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
# concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
# concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
|
||||
|
||||
concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
# concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
||||
concat_ws_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
|
||||
|
||||
|
@ -139,7 +139,10 @@ class TDTestCase:
|
|||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
tbname = [
|
||||
"t1",
|
||||
"stb1"
|
||||
]
|
||||
for tb in tbname:
|
||||
for i in range(2,8):
|
||||
self.__concat_ws_check(tb,i)
|
||||
|
@ -147,7 +150,11 @@ class TDTestCase:
|
|||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
|
||||
tbname = [
|
||||
"ct1",
|
||||
"ct2",
|
||||
"ct4",
|
||||
]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__concat_ws_err_check(tb):
|
||||
|
|
|
@ -0,0 +1,294 @@
|
|||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __concat_ws_condition(self): # sourcery skip: extract-method
|
||||
concat_ws_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
concat_ws_condition.extend(
|
||||
(
|
||||
char_col,
|
||||
# f"upper( {char_col} )",
|
||||
)
|
||||
)
|
||||
concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
|
||||
concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
|
||||
# concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
# concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
# concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
|
||||
concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
# concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
|
||||
concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
|
||||
|
||||
# concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
|
||||
|
||||
concat_ws_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
|
||||
|
||||
return concat_ws_condition
|
||||
|
||||
def __where_condition(self, col):
|
||||
# return f" where count({col}) > 0 "
|
||||
return ""
|
||||
|
||||
def __concat_ws_num(self, concat_ws_lists, num):
|
||||
return [ concat_ws_lists[i] for i in range(num) ]
|
||||
|
||||
|
||||
def __group_condition(self, col, having = ""):
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __concat_ws_check(self, tbname, num):
|
||||
concat_ws_condition = self.__concat_ws_condition()
|
||||
for i in range(len(concat_ws_condition) - num + 1 ):
|
||||
condition = self.__concat_ws_num(concat_ws_condition[i:], num)
|
||||
concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) "
|
||||
where_condition = self.__where_condition(condition[0])
|
||||
# group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
|
||||
concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " )
|
||||
# group_no_having= self.__group_condition(condition[0] )
|
||||
concat_ws_group_no_having= self.__group_condition(concat_ws_filter)
|
||||
groups = ["", concat_ws_group_having, concat_ws_group_no_having]
|
||||
|
||||
if num > 8 or num < 2 :
|
||||
[tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
break
|
||||
|
||||
tdSql.query(f"select {','.join(condition)} from {tbname} ")
|
||||
rows = tdSql.queryRows
|
||||
concat_ws_data = []
|
||||
for m in range(rows):
|
||||
concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None)
|
||||
tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ")
|
||||
tdSql.checkRows(rows)
|
||||
for j in range(tdSql.queryRows):
|
||||
assert tdSql.getData(j, 0) in concat_ws_data
|
||||
|
||||
[ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
|
||||
|
||||
|
||||
def __concat_ws_err_check(self,tbname):
|
||||
sqls = []
|
||||
|
||||
for char_col in CHAR_COL:
|
||||
sqls.extend(
|
||||
(
|
||||
f"select concat_ws('_', {char_col} ) from {tbname} ",
|
||||
f"select concat_ws('_', ceil( {char_col} )) from {tbname} ",
|
||||
f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ",
|
||||
)
|
||||
)
|
||||
|
||||
sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
|
||||
sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
|
||||
sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
|
||||
sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
|
||||
|
||||
sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
|
||||
sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL )
|
||||
sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL )
|
||||
sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
|
||||
sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
|
||||
sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
|
||||
sqls.extend(
|
||||
(
|
||||
f"select concat_ws('_', ) from {tbname} ",
|
||||
f"select concat_ws('_', *) from {tbname} ",
|
||||
f"select concat_ws('_', ccccccc) from {tbname} ",
|
||||
f"select concat_ws('_', 111) from {tbname} ",
|
||||
)
|
||||
)
|
||||
|
||||
return sqls
|
||||
|
||||
def __test_current(self): # sourcery skip: use-itertools-product
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tbname = [
|
||||
"ct1",
|
||||
"ct2",
|
||||
"ct4",
|
||||
]
|
||||
for tb in tbname:
|
||||
for i in range(2,8):
|
||||
self.__concat_ws_check(tb,i)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
|
||||
|
||||
def __test_error(self):
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
tbname = [
|
||||
"t1",
|
||||
"stb1"
|
||||
]
|
||||
|
||||
for tb in tbname:
|
||||
for errsql in self.__concat_ws_err_check(tb):
|
||||
tdSql.error(sql=errsql)
|
||||
self.__concat_ws_check(tb,1)
|
||||
self.__concat_ws_check(tb,9)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__test_current()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,361 @@
|
|||
import datetime
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __query_condition(self,tbname):
|
||||
query_condition = [f"cast({col} as bigint)" for col in ALL_COL]
|
||||
for num_col in NUM_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"{tbname}.{num_col}",
|
||||
f"abs( {tbname}.{num_col} )",
|
||||
f"acos( {tbname}.{num_col} )",
|
||||
f"asin( {tbname}.{num_col} )",
|
||||
f"atan( {tbname}.{num_col} )",
|
||||
f"avg( {tbname}.{num_col} )",
|
||||
f"ceil( {tbname}.{num_col} )",
|
||||
f"cos( {tbname}.{num_col} )",
|
||||
f"count( {tbname}.{num_col} )",
|
||||
f"floor( {tbname}.{num_col} )",
|
||||
f"log( {tbname}.{num_col}, {tbname}.{num_col})",
|
||||
f"max( {tbname}.{num_col} )",
|
||||
f"min( {tbname}.{num_col} )",
|
||||
f"pow( {tbname}.{num_col}, 2)",
|
||||
f"round( {tbname}.{num_col} )",
|
||||
f"sum( {tbname}.{num_col} )",
|
||||
f"sin( {tbname}.{num_col} )",
|
||||
f"sqrt( {tbname}.{num_col} )",
|
||||
f"tan( {tbname}.{num_col} )",
|
||||
f"cast( {tbname}.{num_col} as timestamp)",
|
||||
)
|
||||
)
|
||||
query_condition.extend((f"{num_col} + {any_col}" for any_col in ALL_COL))
|
||||
for char_col in CHAR_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"count({tbname}.{char_col})",
|
||||
f"sum(cast({tbname}.{char_col}) as bigint)",
|
||||
f"max(cast({tbname}.{char_col}) as bigint)",
|
||||
f"min(cast({tbname}.{char_col}) as bigint)",
|
||||
f"avg(cast({tbname}.{char_col}) as bigint)",
|
||||
)
|
||||
)
|
||||
# query_condition.extend(
|
||||
# (
|
||||
# 1010,
|
||||
# )
|
||||
# )
|
||||
|
||||
return query_condition
|
||||
|
||||
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
|
||||
table_reference = tb_list[0]
|
||||
join_condition = table_reference
|
||||
join = "inner join" if INNER else "join"
|
||||
for i in range(len(tb_list[1:])):
|
||||
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
|
||||
|
||||
return join_condition
|
||||
|
||||
def __where_condition(self, col=None, tbname=None, query_conditon=None):
|
||||
if query_conditon and isinstance(query_conditon, str):
|
||||
if query_conditon.startswith("count"):
|
||||
query_conditon = query_conditon[6:-1]
|
||||
elif query_conditon.startswith("max"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("sum"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("min"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
|
||||
if query_conditon:
|
||||
return f" where {query_conditon} is not null"
|
||||
if col in NUM_COL:
|
||||
return f" where abs( {tbname}.{col} ) >= 0"
|
||||
if col in CHAR_COL:
|
||||
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
|
||||
if col in BOOLEAN_COL:
|
||||
return f" where {tbname}.{col} in (false, true) "
|
||||
if col in TS_TYPE_COL or col in PRIMARY_COL:
|
||||
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
|
||||
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = None):
|
||||
if isinstance(col, str):
|
||||
if col.startswith("count"):
|
||||
col = col[6:-1]
|
||||
elif col.startswith("max"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("sum"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("min"):
|
||||
col = col[4:-1]
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
|
||||
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
|
||||
return
|
||||
return f"select hyperloglog({select_clause}) from {from_clause} {where_condition} {group_condition}"
|
||||
|
||||
@property
|
||||
def __tb_list(self):
|
||||
return [
|
||||
"ct1",
|
||||
"ct4",
|
||||
"t1",
|
||||
"ct2",
|
||||
"stb1",
|
||||
]
|
||||
|
||||
def sql_list(self):
|
||||
sqls = []
|
||||
__no_join_tblist = self.__tb_list
|
||||
for tb in __no_join_tblist:
|
||||
select_claus_list = self.__query_condition(tb)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition(col=select_claus)
|
||||
where_claus = self.__where_condition(query_conditon=select_claus)
|
||||
having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
|
||||
sqls.extend(
|
||||
(
|
||||
self.__single_sql(select_claus, tb, where_claus, having_claus),
|
||||
self.__single_sql(select_claus, tb,),
|
||||
self.__single_sql(select_claus, tb, where_condition=where_claus),
|
||||
self.__single_sql(select_claus, tb, group_condition=group_claus),
|
||||
)
|
||||
)
|
||||
|
||||
# return filter(None, sqls)
|
||||
return list(filter(None, sqls))
|
||||
|
||||
def __get_type(self, col):
|
||||
if tdSql.cursor.istype(col, "BOOL"):
|
||||
return "BOOL"
|
||||
if tdSql.cursor.istype(col, "INT"):
|
||||
return "INT"
|
||||
if tdSql.cursor.istype(col, "BIGINT"):
|
||||
return "BIGINT"
|
||||
if tdSql.cursor.istype(col, "TINYINT"):
|
||||
return "TINYINT"
|
||||
if tdSql.cursor.istype(col, "SMALLINT"):
|
||||
return "SMALLINT"
|
||||
if tdSql.cursor.istype(col, "FLOAT"):
|
||||
return "FLOAT"
|
||||
if tdSql.cursor.istype(col, "DOUBLE"):
|
||||
return "DOUBLE"
|
||||
if tdSql.cursor.istype(col, "BINARY"):
|
||||
return "BINARY"
|
||||
if tdSql.cursor.istype(col, "NCHAR"):
|
||||
return "NCHAR"
|
||||
if tdSql.cursor.istype(col, "TIMESTAMP"):
|
||||
return "TIMESTAMP"
|
||||
if tdSql.cursor.istype(col, "JSON"):
|
||||
return "JSON"
|
||||
if tdSql.cursor.istype(col, "TINYINT UNSIGNED"):
|
||||
return "TINYINT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"):
|
||||
return "SMALLINT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "INT UNSIGNED"):
|
||||
return "INT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "BIGINT UNSIGNED"):
|
||||
return "BIGINT UNSIGNED"
|
||||
|
||||
def spread_check(self):
|
||||
sqls = self.sql_list()
|
||||
tdLog.printNoPrefix("===step 1: curent case, must return query OK")
|
||||
for i in range(len(sqls)):
|
||||
tdLog.info(f"sql: {sqls[i]}")
|
||||
tdSql.query(sqls[i])
|
||||
|
||||
def __test_current(self):
|
||||
tdSql.query("select hyperloglog(ts) from ct1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select hyperloglog(c1) from ct2")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select hyperloglog(c1) from ct4 group by c1")
|
||||
tdSql.checkRows(self.rows + 3)
|
||||
tdSql.query("select hyperloglog(c1) from ct4 group by c7")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select hyperloglog(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, self.rows + 2)
|
||||
tdSql.query("select hyperloglog(c1), c1 from stb1 group by c1")
|
||||
for i in range(tdSql.queryRows):
|
||||
tdSql.checkData(i, 0, 1) if tdSql.queryResult[i][1] is not None else tdSql.checkData(i, 0, 0)
|
||||
|
||||
|
||||
|
||||
self.spread_check()
|
||||
|
||||
def __test_error(self):
|
||||
|
||||
tdLog.printNoPrefix("===step 0: err case, must return err")
|
||||
tdSql.error( "select hyperloglog() from ct1" )
|
||||
tdSql.error( "select hyperloglog(c1, c2) from ct2" )
|
||||
tdSql.error( "select hyperloglog(1) from ct2" )
|
||||
tdSql.error( f"select hyperloglog({NUM_COL[0]}, {NUM_COL[1]}) from ct4" )
|
||||
tdSql.error( ''' select hyperloglog(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
|
||||
from ct1
|
||||
where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
|
||||
group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
|
||||
having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
|
||||
|
||||
def all_test(self):
|
||||
self.__test_error()
|
||||
self.__test_current()
|
||||
|
||||
def __create_tb(self):
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -36,17 +36,14 @@ class TDTestCase:
|
|||
query_condition.extend(
|
||||
(
|
||||
f"{tbname}.{char_col}",
|
||||
f"upper( {tbname}.{char_col} )",
|
||||
# f"upper( {tbname}.{char_col} )",
|
||||
)
|
||||
)
|
||||
query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL)
|
||||
query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{char_col_2} as binary(32) ) " for char_col_2 in CHAR_COL )
|
||||
query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{un_char_col} as binary(32) ) " for un_char_col in NUM_COL )
|
||||
for num_col in NUM_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"{tbname}.{num_col}",
|
||||
f"sin( {tbname}.{num_col} )"
|
||||
f"sin( {tbname}.{num_col} )",
|
||||
)
|
||||
)
|
||||
query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL )
|
||||
|
@ -55,41 +52,115 @@ class TDTestCase:
|
|||
|
||||
return query_condition
|
||||
|
||||
def __join_condition(self, tb_list, filter=PRIMARY_COL):
|
||||
# sourcery skip: flip-comparison
|
||||
if 1 == len(tb_list):
|
||||
join_filter = f"{tb_list[0]}.{filter} = {tb_list[0]}.{filter} "
|
||||
elif 2 == len(tb_list):
|
||||
join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} "
|
||||
else:
|
||||
join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} "
|
||||
for i in range(1, len(tb_list)-1 ):
|
||||
join_filter += f"and {tb_list[i]}.{filter} = {tb_list[i+1]}.{filter}"
|
||||
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
|
||||
table_reference = tb_list[0]
|
||||
join_condition = table_reference
|
||||
join = "inner join" if INNER else "join"
|
||||
for i in range(len(tb_list[1:])):
|
||||
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
|
||||
|
||||
return join_filter
|
||||
return join_condition
|
||||
|
||||
def __where_condition(self, col, tbname):
|
||||
def __where_condition(self, col=None, tbname=None, query_conditon=None):
|
||||
if query_conditon and isinstance(query_conditon, str):
|
||||
if query_conditon.startswith("count"):
|
||||
query_conditon = query_conditon[6:-1]
|
||||
elif query_conditon.startswith("max"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("sum"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("min"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
|
||||
if query_conditon:
|
||||
return f" where {query_conditon} is not null"
|
||||
if col in NUM_COL:
|
||||
return f" abs( {tbname}.{col} ) >= 0"
|
||||
elif col in CHAR_COL:
|
||||
return f" lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
|
||||
elif col in BOOLEAN_COL:
|
||||
return f" {tbname}.{col} in (false, true) "
|
||||
elif col in TS_TYPE_COL or col in PRIMARY_COL:
|
||||
return f" cast( {tbname}.{col} as binary(16) ) is not null "
|
||||
else:
|
||||
return ""
|
||||
return f" where abs( {tbname}.{col} ) >= 0"
|
||||
if col in CHAR_COL:
|
||||
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
|
||||
if col in BOOLEAN_COL:
|
||||
return f" where {tbname}.{col} in (false, true) "
|
||||
if col in TS_TYPE_COL or col in PRIMARY_COL:
|
||||
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
|
||||
|
||||
def __group_condition(self, tbname, col, having = ""):
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = None):
|
||||
if isinstance(col, str):
|
||||
if col.startswith("count"):
|
||||
col = col[6:-1]
|
||||
elif col.startswith("max"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("sum"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("min"):
|
||||
col = col[4:-1]
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __join_check(self, tblist, checkrows, join_flag=True):
|
||||
def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
|
||||
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
|
||||
return
|
||||
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
|
||||
|
||||
@property
|
||||
def __join_tblist(self):
|
||||
return [
|
||||
# ["ct1", "ct2"],
|
||||
["ct1", "ct4"],
|
||||
["ct1", "t1"],
|
||||
# ["ct2", "ct4"],
|
||||
# ["ct2", "t1"],
|
||||
# ["ct4", "t1"],
|
||||
# ["ct1", "ct2", "ct4"],
|
||||
# ["ct1", "ct2", "t1"],
|
||||
# ["ct1", "ct4", "t1"],
|
||||
# ["ct2", "ct4", "t1"],
|
||||
# ["ct1", "ct2", "ct4", "t1"],
|
||||
]
|
||||
|
||||
@property
|
||||
def __sqls_list(self):
|
||||
sqls = []
|
||||
__join_tblist = self.__join_tblist
|
||||
for join_tblist in __join_tblist:
|
||||
for join_tb in join_tblist:
|
||||
select_claus_list = self.__query_condition(join_tb)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition( col=select_claus)
|
||||
where_claus = self.__where_condition( query_conditon=select_claus )
|
||||
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
|
||||
sqls.extend(
|
||||
(
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist)),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ),
|
||||
)
|
||||
)
|
||||
return list(filter(None, sqls))
|
||||
|
||||
def __join_check(self,):
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
for i in range(len(self.__sqls_list)):
|
||||
tdSql.query(self.__sqls_list[i])
|
||||
# if i % 10 == 0 :
|
||||
# tdLog.success(f"{i} sql is already executed success !")
|
||||
|
||||
def __join_check_old(self, tblist, checkrows, join_flag=True):
|
||||
query_conditions = self.__query_condition(tblist[0])
|
||||
join_condition = self.__join_condition(tb_list=tblist) if join_flag else " "
|
||||
for condition in query_conditions:
|
||||
where_condition = self.__where_condition(col=condition, tbname=tblist[0])
|
||||
group_having = self.__group_condition(tbname=tblist[0], col=condition, having=f"{condition} is not null " )
|
||||
group_no_having= self.__group_condition(tbname=tblist[0], col=condition )
|
||||
group_having = self.__group_condition(col=condition, having=f"{condition} is not null " )
|
||||
group_no_having= self.__group_condition(col=condition )
|
||||
groups = ["", group_having, group_no_having]
|
||||
for group_condition in groups:
|
||||
if where_condition:
|
||||
|
@ -116,23 +187,6 @@ class TDTestCase:
|
|||
tdSql.query(sql=sql)
|
||||
# tdSql.checkRows(checkrows)
|
||||
|
||||
|
||||
def __test_current(self):
|
||||
# sourcery skip: extract-duplicate-method, inline-immediately-returned-variable
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
tblist_1 = ["ct1", "ct2"]
|
||||
self.__join_check(tblist_1, 1)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_1} over==========")
|
||||
tblist_2 = ["ct2", "ct4"]
|
||||
self.__join_check(tblist_2, self.rows)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_2} over==========")
|
||||
tblist_3 = ["t1", "ct4"]
|
||||
self.__join_check(tblist_3, 1)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_3} over==========")
|
||||
tblist_4 = ["t1", "ct1"]
|
||||
self.__join_check(tblist_4, 1)
|
||||
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_4} over==========")
|
||||
|
||||
def __test_error(self):
|
||||
# sourcery skip: extract-duplicate-method, move-assign-in-block
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
|
@ -141,17 +195,17 @@ class TDTestCase:
|
|||
err_list_3 = ["ct1","ct4", "t1"]
|
||||
err_list_4 = ["ct2","ct4", "t1"]
|
||||
err_list_5 = ["ct1", "ct2","ct4", "t1"]
|
||||
self.__join_check(err_list_1, -1)
|
||||
self.__join_check_old(err_list_1, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========")
|
||||
self.__join_check(err_list_2, -1)
|
||||
self.__join_check_old(err_list_2, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========")
|
||||
self.__join_check(err_list_3, -1)
|
||||
self.__join_check_old(err_list_3, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========")
|
||||
self.__join_check(err_list_4, -1)
|
||||
self.__join_check_old(err_list_4, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========")
|
||||
self.__join_check(err_list_5, -1)
|
||||
self.__join_check_old(err_list_5, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========")
|
||||
self.__join_check(["ct2", "ct4"], -1, join_flag=False)
|
||||
self.__join_check_old(["ct2", "ct4"], -1, join_flag=False)
|
||||
tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========")
|
||||
|
||||
tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
|
||||
|
@ -172,7 +226,7 @@ class TDTestCase:
|
|||
|
||||
|
||||
def all_test(self):
|
||||
self.__test_current()
|
||||
self.__join_check()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,357 @@
|
|||
import datetime
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
def __query_condition(self,tbname):
|
||||
query_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"{tbname}.{char_col}",
|
||||
# f"upper( {tbname}.{char_col} )",
|
||||
)
|
||||
)
|
||||
query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL)
|
||||
for num_col in NUM_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"sin( {tbname}.{num_col} )",
|
||||
)
|
||||
)
|
||||
query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL )
|
||||
|
||||
query_condition.append(''' "test1234!@#$%^&*():'><?/.,][}{" ''')
|
||||
|
||||
return query_condition
|
||||
|
||||
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
|
||||
table_reference = tb_list[0]
|
||||
join_condition = table_reference
|
||||
join = "inner join" if INNER else "join"
|
||||
for i in range(len(tb_list[1:])):
|
||||
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
|
||||
|
||||
return join_condition
|
||||
|
||||
def __where_condition(self, col=None, tbname=None, query_conditon=None):
|
||||
if query_conditon and isinstance(query_conditon, str):
|
||||
if query_conditon.startswith("count"):
|
||||
query_conditon = query_conditon[6:-1]
|
||||
elif query_conditon.startswith("max"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("sum"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("min"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
|
||||
if query_conditon:
|
||||
return f" where {query_conditon} is not null"
|
||||
if col in NUM_COL:
|
||||
return f" where abs( {tbname}.{col} ) >= 0"
|
||||
if col in CHAR_COL:
|
||||
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
|
||||
if col in BOOLEAN_COL:
|
||||
return f" where {tbname}.{col} in (false, true) "
|
||||
if col in TS_TYPE_COL or col in PRIMARY_COL:
|
||||
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
|
||||
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = None):
|
||||
if isinstance(col, str):
|
||||
if col.startswith("count"):
|
||||
col = col[6:-1]
|
||||
elif col.startswith("max"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("sum"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("min"):
|
||||
col = col[4:-1]
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
|
||||
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
|
||||
return
|
||||
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
|
||||
|
||||
@property
|
||||
def __join_tblist(self):
|
||||
return [
|
||||
# ["ct1", "ct2"],
|
||||
# ["ct1", "ct4"],
|
||||
# ["ct1", "t1"],
|
||||
["ct2", "ct4"],
|
||||
# ["ct2", "t1"],
|
||||
["ct4", "t1"],
|
||||
# ["ct1", "ct2", "ct4"],
|
||||
# ["ct1", "ct2", "t1"],
|
||||
# ["ct1", "ct4", "t1"],
|
||||
# ["ct2", "ct4", "t1"],
|
||||
# ["ct1", "ct2", "ct4", "t1"],
|
||||
]
|
||||
|
||||
@property
|
||||
def __sqls_list(self):
|
||||
sqls = []
|
||||
__join_tblist = self.__join_tblist
|
||||
for join_tblist in __join_tblist:
|
||||
for join_tb in join_tblist:
|
||||
select_claus_list = self.__query_condition(join_tb)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition( col=select_claus)
|
||||
where_claus = self.__where_condition( query_conditon=select_claus )
|
||||
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
|
||||
sqls.extend(
|
||||
(
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist)),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ),
|
||||
)
|
||||
)
|
||||
return list(filter(None, sqls))
|
||||
|
||||
def __join_check(self,):
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
for i in range(len(self.__sqls_list)):
|
||||
tdSql.query(self.__sqls_list[i])
|
||||
# if i % 10 == 0 :
|
||||
# tdLog.success(f"{i} sql is already executed success !")
|
||||
|
||||
def __join_check_old(self, tblist, checkrows, join_flag=True):
|
||||
query_conditions = self.__query_condition(tblist[0])
|
||||
join_condition = self.__join_condition(tb_list=tblist) if join_flag else " "
|
||||
for condition in query_conditions:
|
||||
where_condition = self.__where_condition(col=condition, tbname=tblist[0])
|
||||
group_having = self.__group_condition(col=condition, having=f"{condition} is not null " )
|
||||
group_no_having= self.__group_condition(col=condition )
|
||||
groups = ["", group_having, group_no_having]
|
||||
for group_condition in groups:
|
||||
if where_condition:
|
||||
sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} and {where_condition} {group_condition} "
|
||||
else:
|
||||
sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} {group_condition} "
|
||||
|
||||
if not join_flag :
|
||||
tdSql.error(sql=sql)
|
||||
break
|
||||
if len(tblist) == 2:
|
||||
if "ct1" in tblist or "t1" in tblist:
|
||||
self.__join_current(sql, checkrows)
|
||||
elif where_condition or "not null" in group_condition:
|
||||
self.__join_current(sql, checkrows + 2 )
|
||||
elif group_condition:
|
||||
self.__join_current(sql, checkrows + 3 )
|
||||
else:
|
||||
self.__join_current(sql, checkrows + 5 )
|
||||
if len(tblist) > 2 or len(tblist) < 1:
|
||||
tdSql.error(sql=sql)
|
||||
|
||||
def __join_current(self, sql, checkrows):
|
||||
tdSql.query(sql=sql)
|
||||
# tdSql.checkRows(checkrows)
|
||||
|
||||
def __test_error(self):
|
||||
# sourcery skip: extract-duplicate-method, move-assign-in-block
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
err_list_1 = ["ct1","ct2", "ct4"]
|
||||
err_list_2 = ["ct1","ct2", "t1"]
|
||||
err_list_3 = ["ct1","ct4", "t1"]
|
||||
err_list_4 = ["ct2","ct4", "t1"]
|
||||
err_list_5 = ["ct1", "ct2","ct4", "t1"]
|
||||
self.__join_check_old(err_list_1, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========")
|
||||
self.__join_check_old(err_list_2, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========")
|
||||
self.__join_check_old(err_list_3, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========")
|
||||
self.__join_check_old(err_list_4, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========")
|
||||
self.__join_check_old(err_list_5, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========")
|
||||
self.__join_check_old(["ct2", "ct4"], -1, join_flag=False)
|
||||
tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========")
|
||||
|
||||
tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{INT_COL}=ct4.{INT_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{TS_COL}=ct4.{TS_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{TS_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct1.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and c1 is not null " )
|
||||
tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and ct1.c1 is not null " )
|
||||
|
||||
|
||||
tbname = ["ct1", "ct2", "ct4", "t1"]
|
||||
|
||||
# for tb in tbname:
|
||||
# for errsql in self.__join_err_check(tb):
|
||||
# tdSql.error(sql=errsql)
|
||||
# tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__join_check()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,358 @@
|
|||
import datetime
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __query_condition(self,tbname):
|
||||
query_condition = [f"cast({col} as bigint)" for col in ALL_COL]
|
||||
for num_col in NUM_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"{tbname}.{num_col}",
|
||||
f"abs( {tbname}.{num_col} )",
|
||||
f"acos( {tbname}.{num_col} )",
|
||||
f"asin( {tbname}.{num_col} )",
|
||||
f"atan( {tbname}.{num_col} )",
|
||||
f"avg( {tbname}.{num_col} )",
|
||||
f"ceil( {tbname}.{num_col} )",
|
||||
f"cos( {tbname}.{num_col} )",
|
||||
f"count( {tbname}.{num_col} )",
|
||||
f"floor( {tbname}.{num_col} )",
|
||||
f"log( {tbname}.{num_col}, {tbname}.{num_col})",
|
||||
f"max( {tbname}.{num_col} )",
|
||||
f"min( {tbname}.{num_col} )",
|
||||
f"pow( {tbname}.{num_col}, 2)",
|
||||
f"round( {tbname}.{num_col} )",
|
||||
f"sum( {tbname}.{num_col} )",
|
||||
f"sin( {tbname}.{num_col} )",
|
||||
f"sqrt( {tbname}.{num_col} )",
|
||||
f"tan( {tbname}.{num_col} )",
|
||||
f"cast( {tbname}.{num_col} as timestamp)",
|
||||
)
|
||||
)
|
||||
[ query_condition.append(f"{num_col} + {any_col}") for any_col in ALL_COL ]
|
||||
for char_col in CHAR_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"count({tbname}.{char_col})",
|
||||
f"sum(cast({tbname}.{char_col}) as bigint)",
|
||||
f"max(cast({tbname}.{char_col}) as bigint)",
|
||||
f"min(cast({tbname}.{char_col}) as bigint)",
|
||||
f"avg(cast({tbname}.{char_col}) as bigint)",
|
||||
)
|
||||
)
|
||||
query_condition.extend(
|
||||
(
|
||||
1010,
|
||||
)
|
||||
)
|
||||
|
||||
return query_condition
|
||||
|
||||
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
|
||||
table_reference = tb_list[0]
|
||||
join_condition = table_reference
|
||||
join = "inner join" if INNER else "join"
|
||||
for i in range(len(tb_list[1:])):
|
||||
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
|
||||
|
||||
return join_condition
|
||||
|
||||
def __where_condition(self, col=None, tbname=None, query_conditon=None):
|
||||
if query_conditon and isinstance(query_conditon, str):
|
||||
if query_conditon.startswith("count"):
|
||||
query_conditon = query_conditon[6:-1]
|
||||
elif query_conditon.startswith("max"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("sum"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("min"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
|
||||
if query_conditon:
|
||||
return f" where {query_conditon} is not null"
|
||||
if col in NUM_COL:
|
||||
return f" where abs( {tbname}.{col} ) >= 0"
|
||||
if col in CHAR_COL:
|
||||
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
|
||||
if col in BOOLEAN_COL:
|
||||
return f" where {tbname}.{col} in (false, true) "
|
||||
if col in TS_TYPE_COL or col in PRIMARY_COL:
|
||||
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
|
||||
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = None):
|
||||
if isinstance(col, str):
|
||||
if col.startswith("count"):
|
||||
col = col[6:-1]
|
||||
elif col.startswith("max"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("sum"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("min"):
|
||||
col = col[4:-1]
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
|
||||
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
|
||||
return
|
||||
return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}"
|
||||
|
||||
@property
|
||||
def __tb_list(self):
|
||||
return [
|
||||
"ct1",
|
||||
"ct4",
|
||||
"t1",
|
||||
"ct2",
|
||||
"stb1",
|
||||
]
|
||||
|
||||
def sql_list(self):
|
||||
sqls = []
|
||||
__no_join_tblist = self.__tb_list
|
||||
for tb in __no_join_tblist:
|
||||
select_claus_list = self.__query_condition(tb)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition(col=select_claus)
|
||||
where_claus = self.__where_condition(query_conditon=select_claus)
|
||||
having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
|
||||
sqls.extend(
|
||||
(
|
||||
self.__single_sql(select_claus, tb, where_claus, having_claus),
|
||||
self.__single_sql(select_claus, tb,),
|
||||
self.__single_sql(select_claus, tb, where_condition=where_claus),
|
||||
self.__single_sql(select_claus, tb, group_condition=group_claus),
|
||||
)
|
||||
)
|
||||
|
||||
# return filter(None, sqls)
|
||||
return list(filter(None, sqls))
|
||||
|
||||
def __get_type(self, col):
|
||||
if tdSql.cursor.istype(col, "BOOL"):
|
||||
return "BOOL"
|
||||
if tdSql.cursor.istype(col, "INT"):
|
||||
return "INT"
|
||||
if tdSql.cursor.istype(col, "BIGINT"):
|
||||
return "BIGINT"
|
||||
if tdSql.cursor.istype(col, "TINYINT"):
|
||||
return "TINYINT"
|
||||
if tdSql.cursor.istype(col, "SMALLINT"):
|
||||
return "SMALLINT"
|
||||
if tdSql.cursor.istype(col, "FLOAT"):
|
||||
return "FLOAT"
|
||||
if tdSql.cursor.istype(col, "DOUBLE"):
|
||||
return "DOUBLE"
|
||||
if tdSql.cursor.istype(col, "BINARY"):
|
||||
return "BINARY"
|
||||
if tdSql.cursor.istype(col, "NCHAR"):
|
||||
return "NCHAR"
|
||||
if tdSql.cursor.istype(col, "TIMESTAMP"):
|
||||
return "TIMESTAMP"
|
||||
if tdSql.cursor.istype(col, "JSON"):
|
||||
return "JSON"
|
||||
if tdSql.cursor.istype(col, "TINYINT UNSIGNED"):
|
||||
return "TINYINT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"):
|
||||
return "SMALLINT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "INT UNSIGNED"):
|
||||
return "INT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "BIGINT UNSIGNED"):
|
||||
return "BIGINT UNSIGNED"
|
||||
|
||||
def spread_check(self):
|
||||
sqls = self.sql_list()
|
||||
tdLog.printNoPrefix("===step 1: curent case, must return query OK")
|
||||
for i in range(len(sqls)):
|
||||
tdLog.info(f"sql: {sqls[i]}")
|
||||
tdSql.query(sqls[i])
|
||||
|
||||
def __test_current(self):
|
||||
tdSql.query("select spread(ts) from ct1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select spread(c1) from ct2")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select spread(c1) from ct4 group by c1")
|
||||
tdSql.checkRows(self.rows + 3)
|
||||
tdSql.query("select spread(c1) from ct4 group by c7")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
self.spread_check()
|
||||
|
||||
def __test_error(self):
|
||||
|
||||
tdLog.printNoPrefix("===step 0: err case, must return err")
|
||||
tdSql.error( "select spread() from ct1" )
|
||||
tdSql.error( "select spread(1, 2) from ct2" )
|
||||
tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" )
|
||||
tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" )
|
||||
tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" )
|
||||
|
||||
# tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
|
||||
# from ct1
|
||||
# where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
|
||||
# group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
|
||||
# having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
|
||||
# tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
|
||||
|
||||
def all_test(self):
|
||||
self.__test_error()
|
||||
self.__test_current()
|
||||
|
||||
def __create_tb(self):
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue