Merge branch '3.0' into feature/stream
This commit is contained in:
commit
9744dd31fb
|
@ -61,6 +61,7 @@ extern int32_t tsNumOfRpcThreads;
|
|||
extern int32_t tsNumOfCommitThreads;
|
||||
extern int32_t tsNumOfTaskQueueThreads;
|
||||
extern int32_t tsNumOfMnodeQueryThreads;
|
||||
extern int32_t tsNumOfMnodeFetchThreads;
|
||||
extern int32_t tsNumOfMnodeReadThreads;
|
||||
extern int32_t tsNumOfVnodeQueryThreads;
|
||||
extern int32_t tsNumOfVnodeFetchThreads;
|
||||
|
|
|
@ -106,6 +106,7 @@ typedef enum _mgmt_table {
|
|||
TSDB_MGMT_TABLE_CONNS,
|
||||
TSDB_MGMT_TABLE_QUERIES,
|
||||
TSDB_MGMT_TABLE_VNODES,
|
||||
TSDB_MGMT_TABLE_APPS,
|
||||
TSDB_MGMT_TABLE_MAX,
|
||||
} EShowType;
|
||||
|
||||
|
@ -1314,8 +1315,6 @@ int32_t tSerializeSSetStandbyReq(void* buf, int32_t bufLen, SSetStandbyReq* pReq
|
|||
int32_t tDeserializeSSetStandbyReq(void* buf, int32_t bufLen, SSetStandbyReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t connId; // todo remove
|
||||
int32_t queryId; // todo remove
|
||||
char queryStrId[TSDB_QUERY_ID_LEN];
|
||||
} SKillQueryReq;
|
||||
|
||||
|
@ -1323,7 +1322,7 @@ int32_t tSerializeSKillQueryReq(void* buf, int32_t bufLen, SKillQueryReq* pReq);
|
|||
int32_t tDeserializeSKillQueryReq(void* buf, int32_t bufLen, SKillQueryReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t connId;
|
||||
uint32_t connId;
|
||||
} SKillConnReq;
|
||||
|
||||
int32_t tSerializeSKillConnReq(void* buf, int32_t bufLen, SKillConnReq* pReq);
|
||||
|
@ -2010,7 +2009,6 @@ typedef struct {
|
|||
int64_t useconds;
|
||||
int64_t stime; // timestamp precision ms
|
||||
int64_t reqRid;
|
||||
int32_t pid;
|
||||
bool stableQuery;
|
||||
char fqdn[TSDB_FQDN_LEN];
|
||||
int32_t subPlanNum;
|
||||
|
@ -2019,8 +2017,6 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
uint32_t connId;
|
||||
int32_t pid;
|
||||
char app[TSDB_APP_NAME_LEN];
|
||||
SArray* queryDesc; // SArray<SQueryDesc>
|
||||
} SQueryHbReqBasic;
|
||||
|
||||
|
@ -2035,9 +2031,31 @@ typedef struct {
|
|||
SArray* pQnodeList;
|
||||
} SQueryHbRspBasic;
|
||||
|
||||
typedef struct SAppClusterSummary {
|
||||
uint64_t numOfInsertsReq;
|
||||
uint64_t numOfInsertRows;
|
||||
uint64_t insertElapsedTime;
|
||||
uint64_t insertBytes; // submit to tsdb since launched.
|
||||
|
||||
uint64_t fetchBytes;
|
||||
uint64_t queryElapsedTime;
|
||||
uint64_t numOfSlowQueries;
|
||||
uint64_t totalRequests;
|
||||
uint64_t currentRequests; // the number of SRequestObj
|
||||
} SAppClusterSummary;
|
||||
|
||||
typedef struct {
|
||||
int64_t appId;
|
||||
int32_t pid;
|
||||
char name[TSDB_APP_NAME_LEN];
|
||||
int64_t startTime;
|
||||
SAppClusterSummary summary;
|
||||
} SAppHbReq;
|
||||
|
||||
typedef struct {
|
||||
SClientHbKey connKey;
|
||||
int64_t clusterId;
|
||||
SAppHbReq app;
|
||||
SQueryHbReqBasic* query;
|
||||
SHashObj* info; // hash<Skv.key, Skv>
|
||||
} SClientHbReq;
|
||||
|
|
|
@ -292,6 +292,8 @@ int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId,
|
|||
|
||||
int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, bool forceUpdate);
|
||||
|
||||
int32_t catalogClearCache(void);
|
||||
|
||||
/**
|
||||
* Destroy catalog and relase all resources
|
||||
*/
|
||||
|
|
|
@ -33,8 +33,6 @@ typedef enum {
|
|||
JOB_TASK_STATUS_PARTIAL_SUCCEED,
|
||||
JOB_TASK_STATUS_SUCCEED,
|
||||
JOB_TASK_STATUS_FAILED,
|
||||
JOB_TASK_STATUS_CANCELLING,
|
||||
JOB_TASK_STATUS_CANCELLED,
|
||||
JOB_TASK_STATUS_DROPPING,
|
||||
} EJobTaskType;
|
||||
|
||||
|
|
|
@ -73,13 +73,14 @@ typedef void (*schedulerExecCallback)(SQueryResult* pResult, void* param, int32_
|
|||
typedef void (*schedulerFetchCallback)(void* pResult, void* param, int32_t code);
|
||||
|
||||
typedef struct SSchedulerReq {
|
||||
SRequestConnInfo *pConn;
|
||||
SArray *pNodeList;
|
||||
SQueryPlan *pDag;
|
||||
const char *sql;
|
||||
int64_t startTs;
|
||||
bool *reqKilled;
|
||||
SRequestConnInfo *pConn;
|
||||
SArray *pNodeList;
|
||||
SQueryPlan *pDag;
|
||||
const char *sql;
|
||||
int64_t startTs;
|
||||
schedulerExecCallback fp;
|
||||
void* cbParam;
|
||||
void* cbParam;
|
||||
} SSchedulerReq;
|
||||
|
||||
|
||||
|
@ -127,7 +128,7 @@ void schedulerStopQueryHb(void *pTrans);
|
|||
* Free the query job
|
||||
* @param pJob
|
||||
*/
|
||||
void schedulerFreeJob(int64_t job);
|
||||
void schedulerFreeJob(int64_t job, int32_t errCode);
|
||||
|
||||
void schedulerDestroy(void);
|
||||
|
||||
|
|
|
@ -127,6 +127,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TSC_STMT_API_ERROR TAOS_DEF_ERROR_CODE(0, 0X0225)
|
||||
#define TSDB_CODE_TSC_STMT_TBNAME_ERROR TAOS_DEF_ERROR_CODE(0, 0X0226)
|
||||
#define TSDB_CODE_TSC_STMT_CLAUSE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0227)
|
||||
#define TSDB_CODE_TSC_QUERY_KILLED TAOS_DEF_ERROR_CODE(0, 0X0228)
|
||||
|
||||
// mnode-common
|
||||
#define TSDB_CODE_MND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0300)
|
||||
|
@ -571,6 +572,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_SCH_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2502)
|
||||
#define TSDB_CODE_SCH_IGNORE_ERROR TAOS_DEF_ERROR_CODE(0, 0x2503)
|
||||
#define TSDB_CODE_SCH_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x2504)
|
||||
#define TSDB_CODE_SCH_JOB_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x2505)
|
||||
#define TSDB_CODE_QW_MSG_ERROR TAOS_DEF_ERROR_CODE(0, 0x2550)
|
||||
|
||||
//parser
|
||||
|
|
|
@ -76,10 +76,12 @@ typedef int32_t (*FHbReqHandle)(SClientHbKey* connKey, void* param, SClientHbReq
|
|||
|
||||
typedef struct {
|
||||
int8_t inited;
|
||||
int64_t appId;
|
||||
// ctl
|
||||
int8_t threadStop;
|
||||
TdThread thread;
|
||||
TdThreadMutex lock; // used when app init and cleanup
|
||||
SHashObj *appSummary;
|
||||
SArray* appHbMgrs; // SArray<SAppHbMgr*> one for each cluster
|
||||
FHbReqHandle reqHandle[CONN_TYPE__MAX];
|
||||
FHbRspHandle rspHandle[CONN_TYPE__MAX];
|
||||
|
@ -92,33 +94,20 @@ typedef struct SQueryExecMetric {
|
|||
int64_t rsp; // receive response from server, us
|
||||
} SQueryExecMetric;
|
||||
|
||||
typedef struct SInstanceSummary {
|
||||
uint64_t numOfInsertsReq;
|
||||
uint64_t numOfInsertRows;
|
||||
uint64_t insertElapsedTime;
|
||||
uint64_t insertBytes; // submit to tsdb since launched.
|
||||
|
||||
uint64_t fetchBytes;
|
||||
uint64_t queryElapsedTime;
|
||||
uint64_t numOfSlowQueries;
|
||||
uint64_t totalRequests;
|
||||
uint64_t currentRequests; // the number of SRequestObj
|
||||
} SInstanceSummary;
|
||||
|
||||
typedef struct SHeartBeatInfo {
|
||||
void* pTimer; // timer, used to send request msg to mnode
|
||||
} SHeartBeatInfo;
|
||||
|
||||
struct SAppInstInfo {
|
||||
int64_t numOfConns;
|
||||
SCorEpSet mgmtEp;
|
||||
TdThreadMutex qnodeMutex;
|
||||
SArray* pQnodeList;
|
||||
SInstanceSummary summary;
|
||||
SList* pConnList; // STscObj linked list
|
||||
uint64_t clusterId;
|
||||
void* pTransporter;
|
||||
SAppHbMgr* pAppHbMgr;
|
||||
int64_t numOfConns;
|
||||
SCorEpSet mgmtEp;
|
||||
TdThreadMutex qnodeMutex;
|
||||
SArray* pQnodeList;
|
||||
SAppClusterSummary summary;
|
||||
SList* pConnList; // STscObj linked list
|
||||
uint64_t clusterId;
|
||||
void* pTransporter;
|
||||
SAppHbMgr* pAppHbMgr;
|
||||
};
|
||||
|
||||
typedef struct SAppInfo {
|
||||
|
@ -215,6 +204,7 @@ typedef struct SRequestObj {
|
|||
SRequestSendRecvBody body;
|
||||
bool stableQuery;
|
||||
|
||||
bool killed;
|
||||
uint32_t prevCode; //previous error code: todo refactor, add update flag for catalog
|
||||
uint32_t retry;
|
||||
} SRequestObj;
|
||||
|
|
|
@ -48,7 +48,7 @@ static void registerRequest(SRequestObj *pRequest) {
|
|||
int32_t num = atomic_add_fetch_32(&pTscObj->numOfReqs, 1);
|
||||
|
||||
if (pTscObj->pAppInfo) {
|
||||
SInstanceSummary *pSummary = &pTscObj->pAppInfo->summary;
|
||||
SAppClusterSummary *pSummary = &pTscObj->pAppInfo->summary;
|
||||
|
||||
int32_t total = atomic_add_fetch_64((int64_t *)&pSummary->totalRequests, 1);
|
||||
int32_t currentInst = atomic_add_fetch_64((int64_t *)&pSummary->currentRequests, 1);
|
||||
|
@ -62,7 +62,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
|||
assert(pRequest != NULL);
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SInstanceSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
|
||||
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
|
||||
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
|
||||
|
@ -229,7 +229,7 @@ static void doDestroyRequest(void *p) {
|
|||
taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
|
||||
|
||||
if (pRequest->body.queryJob != 0) {
|
||||
schedulerFreeJob(pRequest->body.queryJob);
|
||||
schedulerFreeJob(pRequest->body.queryJob, 0);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pRequest->msgBuf);
|
||||
|
|
|
@ -164,6 +164,7 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
|||
pTscObj->connId = pRsp->query->connId;
|
||||
|
||||
if (pRsp->query->killRid) {
|
||||
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
|
||||
SRequestObj *pRequest = acquireRequest(pRsp->query->killRid);
|
||||
if (NULL == pRequest) {
|
||||
tscDebug("request 0x%" PRIx64 " not exist to kill", pRsp->query->killRid);
|
||||
|
@ -304,7 +305,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
|
|||
while (pIter != NULL) {
|
||||
int64_t *rid = pIter;
|
||||
SRequestObj *pRequest = acquireRequest(*rid);
|
||||
if (NULL == pRequest) {
|
||||
if (NULL == pRequest || pRequest->killed) {
|
||||
pIter = taosHashIterate(pObj->pRequests, pIter);
|
||||
continue;
|
||||
}
|
||||
|
@ -314,7 +315,6 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
|
|||
desc.queryId = pRequest->requestId;
|
||||
desc.useconds = now - pRequest->metric.start;
|
||||
desc.reqRid = pRequest->self;
|
||||
desc.pid = hbBasic->pid;
|
||||
desc.stableQuery = pRequest->stableQuery;
|
||||
taosGetFqdn(desc.fqdn);
|
||||
desc.subPlanNum = pRequest->body.pDag ? pRequest->body.pDag->numOfSubplans : 0;
|
||||
|
@ -360,8 +360,6 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
|
|||
}
|
||||
|
||||
hbBasic->connId = pTscObj->connId;
|
||||
hbBasic->pid = taosGetPId();
|
||||
taosGetAppName(hbBasic->app, NULL);
|
||||
|
||||
int32_t numOfQueries = pTscObj->pRequests ? taosHashGetSize(pTscObj->pRequests) : 0;
|
||||
if (numOfQueries <= 0) {
|
||||
|
@ -507,6 +505,21 @@ int32_t hbGetExpiredStbInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SC
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t hbGetAppInfo(int64_t clusterId, SClientHbReq *req) {
|
||||
SAppHbReq* pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
|
||||
if (NULL != pApp) {
|
||||
memcpy(&req->app, pApp, sizeof(*pApp));
|
||||
} else {
|
||||
memset(&req->app.summary, 0, sizeof(req->app.summary));
|
||||
req->app.pid = taosGetPId();
|
||||
req->app.appId = clientHbMgr.appId;
|
||||
taosGetAppName(req->app.name, NULL);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req) {
|
||||
int64_t *clusterId = (int64_t *)param;
|
||||
struct SCatalog *pCatalog = NULL;
|
||||
|
@ -517,6 +530,8 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req
|
|||
return code;
|
||||
}
|
||||
|
||||
hbGetAppInfo(*clusterId, req);
|
||||
|
||||
hbGetQueryBasicInfo(connKey, req);
|
||||
|
||||
code = hbGetExpiredUserInfo(connKey, pCatalog, req);
|
||||
|
@ -589,6 +604,50 @@ void hbThreadFuncUnexpectedStopped(void) {
|
|||
atomic_store_8(&clientHbMgr.threadStop, 2);
|
||||
}
|
||||
|
||||
void hbMergeSummary(SAppClusterSummary* dst, SAppClusterSummary* src) {
|
||||
dst->numOfInsertsReq += src->numOfInsertsReq;
|
||||
dst->numOfInsertRows += src->numOfInsertRows;
|
||||
dst->insertElapsedTime += src->insertElapsedTime;
|
||||
dst->insertBytes += src->insertBytes;
|
||||
dst->fetchBytes += src->fetchBytes;
|
||||
dst->queryElapsedTime += src->queryElapsedTime;
|
||||
dst->numOfSlowQueries += src->numOfSlowQueries;
|
||||
dst->totalRequests += src->totalRequests;
|
||||
dst->currentRequests += src->currentRequests;
|
||||
}
|
||||
|
||||
int32_t hbGatherAppInfo(void) {
|
||||
SAppHbReq req = {0};
|
||||
int sz = taosArrayGetSize(clientHbMgr.appHbMgrs);
|
||||
if (sz > 0) {
|
||||
req.pid = taosGetPId();
|
||||
req.appId = clientHbMgr.appId;
|
||||
taosGetAppName(req.name, NULL);
|
||||
}
|
||||
|
||||
taosHashClear(clientHbMgr.appSummary);
|
||||
|
||||
for (int32_t i = 0; i < sz; ++i) {
|
||||
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
||||
uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId;
|
||||
SAppHbReq* pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
|
||||
if (NULL == pApp) {
|
||||
memcpy(&req.summary, &pAppHbMgr->pAppInstInfo->summary, sizeof(req.summary));
|
||||
req.startTime = pAppHbMgr->startTime;
|
||||
taosHashPut(clientHbMgr.appSummary, &clusterId, sizeof(clusterId), &req, sizeof(req));
|
||||
} else {
|
||||
if (pAppHbMgr->startTime < pApp->startTime) {
|
||||
pApp->startTime = pAppHbMgr->startTime;
|
||||
}
|
||||
|
||||
hbMergeSummary(&pApp->summary, &pAppHbMgr->pAppInstInfo->summary);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
static void *hbThreadFunc(void *param) {
|
||||
setThreadName("hb");
|
||||
#ifdef WINDOWS
|
||||
|
@ -605,6 +664,10 @@ static void *hbThreadFunc(void *param) {
|
|||
taosThreadMutexLock(&clientHbMgr.lock);
|
||||
|
||||
int sz = taosArrayGetSize(clientHbMgr.appHbMgrs);
|
||||
if (sz > 0) {
|
||||
hbGatherAppInfo();
|
||||
}
|
||||
|
||||
for (int i = 0; i < sz; i++) {
|
||||
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
||||
|
||||
|
@ -748,6 +811,10 @@ int hbMgrInit() {
|
|||
int8_t old = atomic_val_compare_exchange_8(&clientHbMgr.inited, 0, 1);
|
||||
if (old == 1) return 0;
|
||||
|
||||
clientHbMgr.appId = tGenIdPI64();
|
||||
tscDebug("app %" PRIx64 " initialized", clientHbMgr.appId);
|
||||
|
||||
clientHbMgr.appSummary = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
clientHbMgr.appHbMgrs = taosArrayInit(0, sizeof(void *));
|
||||
taosThreadMutexInit(&clientHbMgr.lock, NULL);
|
||||
|
||||
|
|
|
@ -418,7 +418,7 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod
|
|||
while (true) {
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (pRequest->body.queryJob != 0) {
|
||||
schedulerFreeJob(pRequest->body.queryJob);
|
||||
schedulerFreeJob(pRequest->body.queryJob, 0);
|
||||
}
|
||||
|
||||
pRequest->code = code;
|
||||
|
@ -439,7 +439,7 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod
|
|||
pRequest->body.resInfo.numOfRows = res.numOfRows;
|
||||
|
||||
if (pRequest->body.queryJob != 0) {
|
||||
schedulerFreeJob(pRequest->body.queryJob);
|
||||
schedulerFreeJob(pRequest->body.queryJob, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -461,14 +461,15 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
|
|||
.sql = pRequest->sqlstr,
|
||||
.startTs = pRequest->metric.start,
|
||||
.fp = NULL,
|
||||
.cbParam = NULL};
|
||||
.cbParam = NULL,
|
||||
.reqKilled = &pRequest->killed};
|
||||
|
||||
int32_t code = schedulerExecJob(&req, &pRequest->body.queryJob, &res);
|
||||
pRequest->body.resInfo.execRes = res.res;
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (pRequest->body.queryJob != 0) {
|
||||
schedulerFreeJob(pRequest->body.queryJob);
|
||||
schedulerFreeJob(pRequest->body.queryJob, 0);
|
||||
}
|
||||
|
||||
pRequest->code = code;
|
||||
|
@ -481,7 +482,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
|
|||
pRequest->body.resInfo.numOfRows = res.numOfRows;
|
||||
|
||||
if (pRequest->body.queryJob != 0) {
|
||||
schedulerFreeJob(pRequest->body.queryJob);
|
||||
schedulerFreeJob(pRequest->body.queryJob, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -608,6 +609,9 @@ void schedulerExecCb(SQueryResult* pResult, void* param, int32_t code) {
|
|||
SRequestObj* pRequest = (SRequestObj*)param;
|
||||
pRequest->code = code;
|
||||
|
||||
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64,
|
||||
pRequest->self, code, tstrerror(code), pRequest->requestId);
|
||||
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
|
||||
|
@ -738,7 +742,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery) {
|
|||
.sql = pRequest->sqlstr,
|
||||
.startTs = pRequest->metric.start,
|
||||
.fp = schedulerExecCb,
|
||||
.cbParam = pRequest};
|
||||
.cbParam = pRequest,
|
||||
.reqKilled = &pRequest->killed};
|
||||
code = schedulerAsyncExecJob(&req, &pRequest->body.queryJob);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
|
|
|
@ -246,13 +246,14 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
|||
|
||||
if (TD_RES_QUERY(res)) {
|
||||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
if (pRequest->type == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pRequest->type == TSDB_SQL_INSERT ||
|
||||
pRequest->code != TSDB_CODE_SUCCESS || taos_num_fields(res) == 0 || pRequest->killed) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if SYNC_ON_TOP_OF_ASYNC
|
||||
return doAsyncFetchRows(pRequest, true, true);
|
||||
#else
|
||||
if (pRequest->type == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pRequest->type == TSDB_SQL_INSERT ||
|
||||
pRequest->code != TSDB_CODE_SUCCESS || taos_num_fields(res) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
return doFetchRows(pRequest, true, true);
|
||||
#endif
|
||||
|
||||
|
@ -482,14 +483,20 @@ void taos_stop_query(TAOS_RES *res) {
|
|||
}
|
||||
|
||||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
pRequest->killed = true;
|
||||
|
||||
int32_t numOfFields = taos_num_fields(pRequest);
|
||||
|
||||
// It is not a query, no need to stop.
|
||||
if (numOfFields == 0) {
|
||||
tscDebug("request %" PRIx64 " no need to be killed since not query", pRequest->requestId);
|
||||
return;
|
||||
}
|
||||
|
||||
schedulerFreeJob(pRequest->body.queryJob);
|
||||
if (pRequest->body.queryJob) {
|
||||
schedulerFreeJob(pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED);
|
||||
}
|
||||
|
||||
tscDebug("request %" PRIx64 " killed", pRequest->requestId);
|
||||
}
|
||||
|
||||
bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
|
||||
|
@ -830,6 +837,9 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
|
|||
|
||||
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
|
||||
|
||||
tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64,
|
||||
pRequest->self, code, tstrerror(code), pRequest->requestId);
|
||||
|
||||
pResultInfo->pData = pResult;
|
||||
pResultInfo->numOfRows = 0;
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ static const SSysDbTableSchema userDBSchema[] = {
|
|||
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
|
||||
{.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
||||
{.name = "strict", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "duration", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
|
@ -302,7 +302,7 @@ static const SSysDbTableSchema offsetSchema[] = {
|
|||
};
|
||||
|
||||
static const SSysDbTableSchema querySchema[] = {
|
||||
{.name = "query_id", .bytes = 26 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "query_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "req_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "connId", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
|
||||
{.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
|
@ -317,6 +317,24 @@ static const SSysDbTableSchema querySchema[] = {
|
|||
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
};
|
||||
|
||||
static const SSysDbTableSchema appSchema[] = {
|
||||
{.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "start_time", .bytes = 8 , .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
{.name = "insert_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "insert_row", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "insert_time", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "insert_bytes", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "fetch_bytes", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "query_time", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "show_query", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "total_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "current_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT},
|
||||
{.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
};
|
||||
|
||||
static const SSysTableMeta perfsMeta[] = {
|
||||
{TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)},
|
||||
{TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema)},
|
||||
|
@ -327,6 +345,7 @@ static const SSysTableMeta perfsMeta[] = {
|
|||
{TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)},
|
||||
{TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)},
|
||||
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
|
||||
{TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}
|
||||
};
|
||||
|
||||
void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) {
|
||||
|
|
|
@ -51,15 +51,16 @@ int32_t tsNumOfShmThreads = 1;
|
|||
int32_t tsNumOfRpcThreads = 1;
|
||||
int32_t tsNumOfCommitThreads = 2;
|
||||
int32_t tsNumOfTaskQueueThreads = 1;
|
||||
int32_t tsNumOfMnodeQueryThreads = 1;
|
||||
int32_t tsNumOfMnodeQueryThreads = 2;
|
||||
int32_t tsNumOfMnodeFetchThreads = 1;
|
||||
int32_t tsNumOfMnodeReadThreads = 1;
|
||||
int32_t tsNumOfVnodeQueryThreads = 2;
|
||||
int32_t tsNumOfVnodeFetchThreads = 2;
|
||||
int32_t tsNumOfVnodeFetchThreads = 1;
|
||||
int32_t tsNumOfVnodeWriteThreads = 2;
|
||||
int32_t tsNumOfVnodeSyncThreads = 2;
|
||||
int32_t tsNumOfVnodeMergeThreads = 2;
|
||||
int32_t tsNumOfQnodeQueryThreads = 2;
|
||||
int32_t tsNumOfQnodeFetchThreads = 2;
|
||||
int32_t tsNumOfQnodeFetchThreads = 1;
|
||||
int32_t tsNumOfSnodeSharedThreads = 2;
|
||||
int32_t tsNumOfSnodeUniqueThreads = 2;
|
||||
|
||||
|
@ -417,8 +418,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 1);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeFetchThreads = tsNumOfCores / 2;
|
||||
tsNumOfVnodeFetchThreads = TRANGE(tsNumOfVnodeFetchThreads, 2, 4);
|
||||
tsNumOfVnodeFetchThreads = TRANGE(tsNumOfVnodeFetchThreads, 1, 1);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeWriteThreads = tsNumOfCores;
|
||||
|
@ -437,8 +437,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 1);
|
||||
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfQnodeFetchThreads = tsNumOfCores / 2;
|
||||
tsNumOfQnodeFetchThreads = TRANGE(tsNumOfQnodeFetchThreads, 2, 4);
|
||||
tsNumOfQnodeFetchThreads = TRANGE(tsNumOfQnodeFetchThreads, 1, 1);
|
||||
if (cfgAddInt32(pCfg, "numOfQnodeFetchThreads", tsNumOfQnodeFetchThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfSnodeSharedThreads = tsNumOfCores / 4;
|
||||
|
|
|
@ -191,13 +191,25 @@ static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pR
|
|||
if (tEncodeSClientHbKey(pEncoder, &pReq->connKey) < 0) return -1;
|
||||
|
||||
if (pReq->connKey.connType == CONN_TYPE__QUERY) {
|
||||
if (tEncodeI64(pEncoder, pReq->app.appId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pReq->app.pid) < 0) return -1;
|
||||
if (tEncodeCStr(pEncoder, pReq->app.name) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->app.startTime) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.numOfInsertsReq) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.numOfInsertRows) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.insertElapsedTime) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.insertBytes) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.fetchBytes) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.queryElapsedTime) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.numOfSlowQueries) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.totalRequests) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pReq->app.summary.currentRequests) < 0) return -1;
|
||||
|
||||
int32_t queryNum = 0;
|
||||
if (pReq->query) {
|
||||
queryNum = 1;
|
||||
if (tEncodeI32(pEncoder, queryNum) < 0) return -1;
|
||||
if (tEncodeU32(pEncoder, pReq->query->connId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pReq->query->pid) < 0) return -1;
|
||||
if (tEncodeCStr(pEncoder, pReq->query->app) < 0) return -1;
|
||||
|
||||
int32_t num = taosArrayGetSize(pReq->query->queryDesc);
|
||||
if (tEncodeI32(pEncoder, num) < 0) return -1;
|
||||
|
@ -209,7 +221,6 @@ static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pR
|
|||
if (tEncodeI64(pEncoder, desc->useconds) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, desc->stime) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, desc->reqRid) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, desc->pid) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, desc->stableQuery) < 0) return -1;
|
||||
if (tEncodeCStr(pEncoder, desc->fqdn) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, desc->subPlanNum) < 0) return -1;
|
||||
|
@ -243,14 +254,26 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq)
|
|||
if (tDecodeSClientHbKey(pDecoder, &pReq->connKey) < 0) return -1;
|
||||
|
||||
if (pReq->connKey.connType == CONN_TYPE__QUERY) {
|
||||
if (tDecodeI64(pDecoder, &pReq->app.appId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->app.pid) < 0) return -1;
|
||||
if (tDecodeCStrTo(pDecoder, pReq->app.name) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pReq->app.startTime) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.numOfInsertsReq) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.numOfInsertRows) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.insertElapsedTime) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.insertBytes) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.fetchBytes) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.queryElapsedTime) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.numOfSlowQueries) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.totalRequests) < 0) return -1;
|
||||
if (tDecodeU64(pDecoder, &pReq->app.summary.currentRequests) < 0) return -1;
|
||||
|
||||
int32_t queryNum = 0;
|
||||
if (tDecodeI32(pDecoder, &queryNum) < 0) return -1;
|
||||
if (queryNum) {
|
||||
pReq->query = taosMemoryCalloc(1, sizeof(*pReq->query));
|
||||
if (NULL == pReq->query) return -1;
|
||||
if (tDecodeU32(pDecoder, &pReq->query->connId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->query->pid) < 0) return -1;
|
||||
if (tDecodeCStrTo(pDecoder, pReq->query->app) < 0) return -1;
|
||||
|
||||
int32_t num = 0;
|
||||
if (tDecodeI32(pDecoder, &num) < 0) return -1;
|
||||
|
@ -265,7 +288,6 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq)
|
|||
if (tDecodeI64(pDecoder, &desc.useconds) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &desc.stime) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &desc.reqRid) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &desc.pid) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, (int8_t*)&desc.stableQuery) < 0) return -1;
|
||||
if (tDecodeCStrTo(pDecoder, desc.fqdn) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &desc.subPlanNum) < 0) return -1;
|
||||
|
@ -3410,7 +3432,7 @@ int32_t tSerializeSKillConnReq(void *buf, int32_t bufLen, SKillConnReq *pReq) {
|
|||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->connId) < 0) return -1;
|
||||
if (tEncodeU32(&encoder, pReq->connId) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
|
@ -3423,7 +3445,7 @@ int32_t tDeserializeSKillConnReq(void *buf, int32_t bufLen, SKillConnReq *pReq)
|
|||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->connId) < 0) return -1;
|
||||
if (tDecodeU32(&decoder, &pReq->connId) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
|
|
@ -30,6 +30,7 @@ typedef struct SMnodeMgmt {
|
|||
const char *path;
|
||||
const char *name;
|
||||
SSingleWorker queryWorker;
|
||||
SSingleWorker fetchWorker;
|
||||
SSingleWorker readWorker;
|
||||
SSingleWorker writeWorker;
|
||||
SSingleWorker syncWorker;
|
||||
|
@ -57,6 +58,7 @@ int32_t mmPutMsgToWriteQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
|||
int32_t mmPutMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutMsgToFetchQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutMsgToMonitorQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t mmPutMsgToQueue(SMnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc);
|
||||
|
||||
|
|
|
@ -122,6 +122,13 @@ int32_t mmPutMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return mmPutMsgToWorker(pMgmt, &pMgmt->queryWorker, pMsg);
|
||||
}
|
||||
|
||||
int32_t mmPutMsgToFetchQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
pMsg->info.node = pMgmt->pMnode;
|
||||
|
||||
return mmPutMsgToWorker(pMgmt, &pMgmt->fetchWorker, pMsg);
|
||||
}
|
||||
|
||||
|
||||
int32_t mmPutMsgToMonitorQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
return mmPutMsgToWorker(pMgmt, &pMgmt->monitorWorker, pMsg);
|
||||
}
|
||||
|
@ -135,6 +142,9 @@ int32_t mmPutMsgToQueue(SMnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
case QUERY_QUEUE:
|
||||
pWorker = &pMgmt->queryWorker;
|
||||
break;
|
||||
case FETCH_QUEUE:
|
||||
pWorker = &pMgmt->fetchWorker;
|
||||
break;
|
||||
case READ_QUEUE:
|
||||
pWorker = &pMgmt->readWorker;
|
||||
break;
|
||||
|
@ -167,6 +177,18 @@ int32_t mmStartWorker(SMnodeMgmt *pMgmt) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
SSingleWorkerCfg fCfg = {
|
||||
.min = tsNumOfMnodeFetchThreads,
|
||||
.max = tsNumOfMnodeFetchThreads,
|
||||
.name = "mnode-fetch",
|
||||
.fp = (FItem)mmProcessRpcMsg,
|
||||
.param = pMgmt,
|
||||
};
|
||||
if (tSingleWorkerInit(&pMgmt->fetchWorker, &fCfg) != 0) {
|
||||
dError("failed to start mnode-fetch worker since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSingleWorkerCfg rCfg = {
|
||||
.min = tsNumOfMnodeReadThreads,
|
||||
.max = tsNumOfMnodeReadThreads,
|
||||
|
@ -224,6 +246,7 @@ void mmStopWorker(SMnodeMgmt *pMgmt) {
|
|||
|
||||
tSingleWorkerCleanup(&pMgmt->monitorWorker);
|
||||
tSingleWorkerCleanup(&pMgmt->queryWorker);
|
||||
tSingleWorkerCleanup(&pMgmt->fetchWorker);
|
||||
tSingleWorkerCleanup(&pMgmt->readWorker);
|
||||
tSingleWorkerCleanup(&pMgmt->writeWorker);
|
||||
tSingleWorkerCleanup(&pMgmt->syncWorker);
|
||||
|
|
|
@ -67,7 +67,8 @@ typedef struct {
|
|||
} SShowMgmt;
|
||||
|
||||
typedef struct {
|
||||
SCacheObj *cache;
|
||||
SCacheObj *connCache;
|
||||
SCacheObj *appCache;
|
||||
} SProfileMgmt;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -1391,11 +1391,13 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, rows, (const char *)strict, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.daysPerFile, false);
|
||||
|
||||
char tmp[128] = {0};
|
||||
int32_t len = 0;
|
||||
len = sprintf(&tmp[VARSTR_HEADER_SIZE], "%dm", pDb->cfg.daysPerFile);
|
||||
varDataSetLen(tmp, len);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, rows, (const char *)tmp, false);
|
||||
|
||||
if (pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep1 || pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep2) {
|
||||
len = sprintf(&tmp[VARSTR_HEADER_SIZE], "%dm,%dm,%dm", pDb->cfg.daysToKeep1, pDb->cfg.daysToKeep2,
|
||||
pDb->cfg.daysToKeep0);
|
||||
|
|
|
@ -43,6 +43,16 @@ typedef struct {
|
|||
SArray *pQueries; // SArray<SQueryDesc>
|
||||
} SConnObj;
|
||||
|
||||
typedef struct {
|
||||
int64_t appId;
|
||||
uint32_t ip;
|
||||
int32_t pid;
|
||||
char name[TSDB_APP_NAME_LEN];
|
||||
int64_t startTime;
|
||||
SAppClusterSummary summary;
|
||||
int64_t lastAccessTimeMs;
|
||||
} SAppObj;
|
||||
|
||||
static SConnObj *mndCreateConn(SMnode *pMnode, const char *user, int8_t connType, uint32_t ip, uint16_t port,
|
||||
int32_t pid, const char *app, int64_t startTime);
|
||||
static void mndFreeConn(SConnObj *pConn);
|
||||
|
@ -57,14 +67,24 @@ static int32_t mndProcessKillConnReq(SRpcMsg *pReq);
|
|||
static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
|
||||
static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
|
||||
static void mndCancelGetNextQuery(SMnode *pMnode, void *pIter);
|
||||
static void mndFreeApp(SAppObj *pApp);
|
||||
static int32_t mndRetrieveApps(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
|
||||
static void mndCancelGetNextApp(SMnode *pMnode, void *pIter);
|
||||
|
||||
int32_t mndInitProfile(SMnode *pMnode) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
|
||||
// in ms
|
||||
int32_t connCheckTime = tsShellActivityTimer * 2 * 1000;
|
||||
pMgmt->cache = taosCacheInit(TSDB_DATA_TYPE_INT, connCheckTime, true, (__cache_free_fn_t)mndFreeConn, "conn");
|
||||
if (pMgmt->cache == NULL) {
|
||||
int32_t checkTime = tsShellActivityTimer * 2 * 1000;
|
||||
pMgmt->connCache = taosCacheInit(TSDB_DATA_TYPE_UINT, checkTime, true, (__cache_free_fn_t)mndFreeConn, "conn");
|
||||
if (pMgmt->connCache == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
mError("failed to alloc profile cache since %s", terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
pMgmt->appCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, checkTime, true, (__cache_free_fn_t)mndFreeApp, "app");
|
||||
if (pMgmt->appCache == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
mError("failed to alloc profile cache since %s", terrstr());
|
||||
return -1;
|
||||
|
@ -79,15 +99,22 @@ int32_t mndInitProfile(SMnode *pMnode) {
|
|||
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CONNS, mndCancelGetNextConn);
|
||||
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_QUERIES, mndRetrieveQueries);
|
||||
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_QUERIES, mndCancelGetNextQuery);
|
||||
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_APPS, mndRetrieveApps);
|
||||
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_APPS, mndCancelGetNextApp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mndCleanupProfile(SMnode *pMnode) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
if (pMgmt->cache != NULL) {
|
||||
taosCacheCleanup(pMgmt->cache);
|
||||
pMgmt->cache = NULL;
|
||||
if (pMgmt->connCache != NULL) {
|
||||
taosCacheCleanup(pMgmt->connCache);
|
||||
pMgmt->connCache = NULL;
|
||||
}
|
||||
|
||||
if (pMgmt->appCache != NULL) {
|
||||
taosCacheCleanup(pMgmt->appCache);
|
||||
pMgmt->appCache = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,7 +124,7 @@ static SConnObj *mndCreateConn(SMnode *pMnode, const char *user, int8_t connType
|
|||
|
||||
char connStr[255] = {0};
|
||||
int32_t len = snprintf(connStr, sizeof(connStr), "%s%d%d%d%s", user, ip, port, pid, app);
|
||||
int32_t connId = mndGenerateUid(connStr, len);
|
||||
uint32_t connId = mndGenerateUid(connStr, len);
|
||||
if (startTime == 0) startTime = taosGetTimestampMs();
|
||||
|
||||
SConnObj connObj = {.id = connId,
|
||||
|
@ -118,7 +145,7 @@ static SConnObj *mndCreateConn(SMnode *pMnode, const char *user, int8_t connType
|
|||
tstrncpy(connObj.app, app, TSDB_APP_NAME_LEN);
|
||||
|
||||
int32_t keepTime = tsShellActivityTimer * 3;
|
||||
SConnObj *pConn = taosCachePut(pMgmt->cache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), keepTime * 1000);
|
||||
SConnObj *pConn = taosCachePut(pMgmt->connCache, &connId, sizeof(uint32_t), &connObj, sizeof(connObj), keepTime * 1000);
|
||||
if (pConn == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
mError("conn:%d, failed to put into cache since %s, user:%s", connId, user, terrstr());
|
||||
|
@ -140,14 +167,13 @@ static void mndFreeConn(SConnObj *pConn) {
|
|||
static SConnObj *mndAcquireConn(SMnode *pMnode, uint32_t connId) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
|
||||
SConnObj *pConn = taosCacheAcquireByKey(pMgmt->cache, &connId, sizeof(connId));
|
||||
SConnObj *pConn = taosCacheAcquireByKey(pMgmt->connCache, &connId, sizeof(connId));
|
||||
if (pConn == NULL) {
|
||||
mDebug("conn:%u, already destroyed", connId);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t keepTime = tsShellActivityTimer * 3;
|
||||
pConn->lastAccessTimeMs = keepTime * 1000 + (uint64_t)taosGetTimestampMs();
|
||||
pConn->lastAccessTimeMs = taosGetTimestampMs();
|
||||
|
||||
mTrace("conn:%u, acquired from cache, data:%p", pConn->id, pConn);
|
||||
return pConn;
|
||||
|
@ -158,7 +184,7 @@ static void mndReleaseConn(SMnode *pMnode, SConnObj *pConn) {
|
|||
mTrace("conn:%u, released from cache, data:%p", pConn->id, pConn);
|
||||
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
taosCacheRelease(pMgmt->cache, (void **)&pConn, false);
|
||||
taosCacheRelease(pMgmt->connCache, (void **)&pConn, false);
|
||||
}
|
||||
|
||||
void *mndGetNextConn(SMnode *pMnode, SCacheIter *pIter) {
|
||||
|
@ -276,6 +302,77 @@ static int32_t mndSaveQueryList(SConnObj *pConn, SQueryHbReqBasic *pBasic) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SAppObj *mndCreateApp(SMnode *pMnode, uint32_t clientIp, SAppHbReq* pReq) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
|
||||
SAppObj app;
|
||||
app.appId = pReq->appId;
|
||||
app.ip = clientIp;
|
||||
app.pid = pReq->pid;
|
||||
strcpy(app.name, pReq->name);
|
||||
app.startTime = pReq->startTime;
|
||||
memcpy(&app.summary, &pReq->summary, sizeof(pReq->summary));
|
||||
app.lastAccessTimeMs = taosGetTimestampMs();
|
||||
|
||||
int32_t keepTime = tsShellActivityTimer * 3;
|
||||
SAppObj *pApp = taosCachePut(pMgmt->appCache, &pReq->appId, sizeof(pReq->appId), &app, sizeof(app), keepTime * 1000);
|
||||
if (pApp == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
mError("failed to app %" PRIx64 " into cache since %s", pReq->appId, terrstr());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mTrace("app %" PRIx64 " is put into cache", pReq->appId);
|
||||
return pApp;
|
||||
}
|
||||
|
||||
static void mndFreeApp(SAppObj *pApp) {
|
||||
mTrace("app %" PRIx64 " is destroyed", pApp->appId);
|
||||
}
|
||||
|
||||
|
||||
static SAppObj *mndAcquireApp(SMnode *pMnode, int64_t appId) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
|
||||
SAppObj *pApp = taosCacheAcquireByKey(pMgmt->appCache, &appId, sizeof(appId));
|
||||
if (pApp == NULL) {
|
||||
mDebug("app %" PRIx64 " not in cache", appId);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pApp->lastAccessTimeMs = (uint64_t)taosGetTimestampMs();
|
||||
|
||||
mTrace("app %" PRIx64 " acquired from cache", appId);
|
||||
return pApp;
|
||||
}
|
||||
|
||||
static void mndReleaseApp(SMnode *pMnode, SAppObj *pApp) {
|
||||
if (pApp == NULL) return;
|
||||
mTrace("release app %" PRIx64 " to cache", pApp->appId);
|
||||
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
taosCacheRelease(pMgmt->appCache, (void **)&pApp, false);
|
||||
}
|
||||
|
||||
void *mndGetNextApp(SMnode *pMnode, SCacheIter *pIter) {
|
||||
SAppObj *pApp = NULL;
|
||||
bool hasNext = taosCacheIterNext(pIter);
|
||||
if (hasNext) {
|
||||
size_t dataLen = 0;
|
||||
pApp = taosCacheIterGetData(pIter, &dataLen);
|
||||
} else {
|
||||
taosCacheDestroyIter(pIter);
|
||||
}
|
||||
|
||||
return pApp;
|
||||
}
|
||||
|
||||
static void mndCancelGetNextApp(SMnode *pMnode, void *pIter) {
|
||||
if (pIter != NULL) {
|
||||
taosCacheDestroyIter(pIter);
|
||||
}
|
||||
}
|
||||
|
||||
static SClientHbRsp *mndMqHbBuildRsp(SMnode *pMnode, SClientHbReq *pReq) {
|
||||
#if 0
|
||||
SClientHbRsp* pRsp = taosMemoryMalloc(sizeof(SClientHbRsp));
|
||||
|
@ -341,25 +438,48 @@ static SClientHbRsp *mndMqHbBuildRsp(SMnode *pMnode, SClientHbReq *pReq) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int32_t mndUpdateAppInfo(SMnode *pMnode, SClientHbReq *pHbReq, SRpcConnInfo *connInfo) {
|
||||
SAppHbReq* pReq = &pHbReq->app;
|
||||
SAppObj *pApp = mndAcquireApp(pMnode, pReq->appId);
|
||||
if (pApp == NULL) {
|
||||
pApp = mndCreateApp(pMnode, connInfo->clientIp, pReq);
|
||||
if (pApp == NULL) {
|
||||
mError("failed to create new app %" PRIx64 " since %s", pReq->appId, terrstr());
|
||||
return -1;
|
||||
} else {
|
||||
mDebug("a new app %" PRIx64 "created", pReq->appId);
|
||||
mndReleaseApp(pMnode, pApp);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(&pApp->summary, &pReq->summary, sizeof(pReq->summary));
|
||||
|
||||
mndReleaseApp(pMnode, pApp);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHbReq *pHbReq,
|
||||
SClientHbBatchRsp *pBatchRsp) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
SClientHbRsp hbRsp = {.connKey = pHbReq->connKey, .status = 0, .info = NULL, .query = NULL};
|
||||
SRpcConnInfo connInfo = pMsg->info.conn;
|
||||
|
||||
mndUpdateAppInfo(pMnode, pHbReq, &connInfo);
|
||||
|
||||
if (pHbReq->query) {
|
||||
SQueryHbReqBasic *pBasic = pHbReq->query;
|
||||
|
||||
SRpcConnInfo connInfo = pMsg->info.conn;
|
||||
|
||||
SConnObj *pConn = mndAcquireConn(pMnode, pBasic->connId);
|
||||
if (pConn == NULL) {
|
||||
pConn = mndCreateConn(pMnode, connInfo.user, CONN_TYPE__QUERY, connInfo.clientIp, connInfo.clientPort,
|
||||
pBasic->pid, pBasic->app, 0);
|
||||
pHbReq->app.pid, pHbReq->app.name, 0);
|
||||
if (pConn == NULL) {
|
||||
mError("user:%s, conn:%u is freed and failed to create new since %s", connInfo.user, pBasic->connId, terrstr());
|
||||
return -1;
|
||||
} else {
|
||||
mDebug("user:%s, conn:%u is freed and create a new conn:%u", connInfo.user, pBasic->connId, pConn->id);
|
||||
mDebug("user:%s, conn:%u is freed, will create a new conn:%u", connInfo.user, pBasic->connId, pConn->id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -516,17 +636,28 @@ static int32_t mndProcessKillQueryReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
mInfo("kill query msg is received, queryId:%d", killReq.queryId);
|
||||
mInfo("kill query msg is received, queryId:%s", killReq.queryStrId);
|
||||
int32_t connId = 0;
|
||||
uint64_t queryId = 0;
|
||||
char* p = strchr(killReq.queryStrId, ':');
|
||||
if (NULL == p) {
|
||||
mError("invalid query id %s", killReq.queryStrId);
|
||||
terrno = TSDB_CODE_MND_INVALID_QUERY_ID;
|
||||
return -1;
|
||||
}
|
||||
*p = 0;
|
||||
connId = taosStr2Int32(killReq.queryStrId, NULL, 16);
|
||||
queryId = taosStr2UInt64(p + 1, NULL, 16);
|
||||
|
||||
SConnObj *pConn = taosCacheAcquireByKey(pMgmt->cache, &killReq.connId, sizeof(int32_t));
|
||||
SConnObj *pConn = taosCacheAcquireByKey(pMgmt->connCache, &connId, sizeof(int32_t));
|
||||
if (pConn == NULL) {
|
||||
mError("connId:%d, failed to kill queryId:%d, conn not exist", killReq.connId, killReq.queryId);
|
||||
mError("connId:%x, failed to kill queryId:%" PRIx64 ", conn not exist", connId, queryId);
|
||||
terrno = TSDB_CODE_MND_INVALID_CONN_ID;
|
||||
return -1;
|
||||
} else {
|
||||
mInfo("connId:%d, queryId:%d is killed by user:%s", killReq.connId, killReq.queryId, pReq->info.conn.user);
|
||||
pConn->killId = killReq.queryId;
|
||||
taosCacheRelease(pMgmt->cache, (void **)&pConn, false);
|
||||
mInfo("connId:%x, queryId:%" PRIx64 " is killed by user:%s", connId, queryId, pReq->info.conn.user);
|
||||
pConn->killId = queryId;
|
||||
taosCacheRelease(pMgmt->connCache, (void **)&pConn, false);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -550,15 +681,15 @@ static int32_t mndProcessKillConnReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
SConnObj *pConn = taosCacheAcquireByKey(pMgmt->cache, &killReq.connId, sizeof(int32_t));
|
||||
SConnObj *pConn = taosCacheAcquireByKey(pMgmt->connCache, &killReq.connId, sizeof(uint32_t));
|
||||
if (pConn == NULL) {
|
||||
mError("connId:%d, failed to kill connection, conn not exist", killReq.connId);
|
||||
mError("connId:%u, failed to kill connection, conn not exist", killReq.connId);
|
||||
terrno = TSDB_CODE_MND_INVALID_CONN_ID;
|
||||
return -1;
|
||||
} else {
|
||||
mInfo("connId:%d, is killed by user:%s", killReq.connId, pReq->info.conn.user);
|
||||
mInfo("connId:%u, is killed by user:%s", killReq.connId, pReq->info.conn.user);
|
||||
pConn->killed = 1;
|
||||
taosCacheRelease(pMgmt->cache, (void **)&pConn, false);
|
||||
taosCacheRelease(pMgmt->connCache, (void **)&pConn, false);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
@ -572,7 +703,7 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
|
|||
|
||||
if (pShow->pIter == NULL) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
pShow->pIter = taosCacheCreateIter(pMgmt->cache);
|
||||
pShow->pIter = taosCacheCreateIter(pMgmt->connCache);
|
||||
}
|
||||
|
||||
while (numOfRows < rows) {
|
||||
|
@ -628,7 +759,7 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
|||
|
||||
if (pShow->pIter == NULL) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
pShow->pIter = taosCacheCreateIter(pMgmt->cache);
|
||||
pShow->pIter = taosCacheCreateIter(pMgmt->connCache);
|
||||
}
|
||||
|
||||
while (numOfRows < rows) {
|
||||
|
@ -667,7 +798,7 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
|||
colDataAppend(pColInfo, numOfRows, (const char *)app, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pQuery->pid, false);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pConn->pid, false);
|
||||
|
||||
char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_TO_VARSTR(user, pConn->user);
|
||||
|
@ -721,6 +852,86 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
|
|||
return numOfRows;
|
||||
}
|
||||
|
||||
static int32_t mndRetrieveApps(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
int32_t numOfRows = 0;
|
||||
int32_t cols = 0;
|
||||
SAppObj *pApp = NULL;
|
||||
|
||||
if (pShow->pIter == NULL) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
pShow->pIter = taosCacheCreateIter(pMgmt->appCache);
|
||||
}
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pApp = mndGetNextApp(pMnode, pShow->pIter);
|
||||
if (pApp == NULL) {
|
||||
pShow->pIter = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
cols = 0;
|
||||
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->appId, false);
|
||||
|
||||
char ip[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0};
|
||||
sprintf(&ip[VARSTR_HEADER_SIZE], "%s", taosIpStr(pApp->ip));
|
||||
varDataLen(ip) = strlen(&ip[VARSTR_HEADER_SIZE]);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)ip, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->pid, false);
|
||||
|
||||
char name[TSDB_APP_NAME_LEN + 6 + VARSTR_HEADER_SIZE] = {0};
|
||||
sprintf(&name[VARSTR_HEADER_SIZE], "%s", pApp->name);
|
||||
varDataLen(name) = strlen(&name[VARSTR_HEADER_SIZE]);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)name, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->startTime, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.numOfInsertsReq, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.numOfInsertRows, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.insertElapsedTime, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.insertBytes, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.fetchBytes, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.queryElapsedTime, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.numOfSlowQueries, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.totalRequests, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.currentRequests, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pApp->lastAccessTimeMs, false);
|
||||
|
||||
numOfRows++;
|
||||
}
|
||||
|
||||
pShow->numOfRows += numOfRows;
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
||||
static void mndCancelGetNextQuery(SMnode *pMnode, void *pIter) {
|
||||
if (pIter != NULL) {
|
||||
taosCacheDestroyIter(pIter);
|
||||
|
@ -729,5 +940,5 @@ static void mndCancelGetNextQuery(SMnode *pMnode, void *pIter) {
|
|||
|
||||
int32_t mndGetNumOfConnections(SMnode *pMnode) {
|
||||
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
|
||||
return taosCacheGetNumOfObj(pMgmt->cache);
|
||||
return taosCacheGetNumOfObj(pMgmt->connCache);
|
||||
}
|
||||
|
|
|
@ -104,6 +104,8 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
|
|||
type = TSDB_MGMT_TABLE_TOPICS;
|
||||
} else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) {
|
||||
type = TSDB_MGMT_TABLE_STREAMS;
|
||||
} else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) {
|
||||
type = TSDB_MGMT_TABLE_APPS;
|
||||
} else {
|
||||
// ASSERT(0);
|
||||
}
|
||||
|
|
|
@ -295,8 +295,7 @@ TEST_F(MndTestProfile, 07_KillQueryMsg) {
|
|||
|
||||
TEST_F(MndTestProfile, 08_KillQueryMsg_InvalidConn) {
|
||||
SKillQueryReq killReq = {0};
|
||||
killReq.connId = 2345;
|
||||
killReq.queryId = 2345;
|
||||
strcpy(killReq.queryStrId, "2345:2345");
|
||||
|
||||
int32_t contLen = tSerializeSKillQueryReq(NULL, 0, &killReq);
|
||||
void* pReq = rpcMallocCont(contLen);
|
||||
|
|
|
@ -111,7 +111,7 @@ int32_t tsdbBegin(STsdb *pTsdb) {
|
|||
|
||||
int32_t tsdbCommit(STsdb *pTsdb) {
|
||||
if (!pTsdb) return 0;
|
||||
|
||||
|
||||
int32_t code = 0;
|
||||
SCommitH commith = {0};
|
||||
SDFileSet *pSet = NULL;
|
||||
|
@ -495,7 +495,9 @@ static int32_t tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
|
|||
break;
|
||||
}
|
||||
|
||||
if (pIter && pIter->pTable && (!pIdx || (pIter->pTable->suid <= pIdx->suid || pIter->pTable->uid <= pIdx->uid))) {
|
||||
if (pIter && pIter->pTable &&
|
||||
(!pIdx || ((pIter->pTable->suid < pIdx->suid) ||
|
||||
((pIter->pTable->suid == pIdx->suid) && (pIter->pTable->uid <= pIdx->uid))))) {
|
||||
if (tsdbCommitToTable(pCommith, mIter) < 0) {
|
||||
tsdbCloseCommitFile(pCommith, true);
|
||||
// revert the file change
|
||||
|
@ -503,7 +505,7 @@ static int32_t tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (pIdx && (pIter->pTable->uid == pIdx->uid)) {
|
||||
if (pIdx && ((pIter->pTable->uid == pIdx->uid) && (pIter->pTable->suid == pIdx->suid))) {
|
||||
++fIter;
|
||||
}
|
||||
++mIter;
|
||||
|
@ -518,6 +520,8 @@ static int32_t tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) {
|
|||
return -1;
|
||||
}
|
||||
++fIter;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -59,6 +59,7 @@ enum {
|
|||
CTG_OP_UPDATE_VG_EPSET,
|
||||
CTG_OP_UPDATE_TB_INDEX,
|
||||
CTG_OP_DROP_TB_INDEX,
|
||||
CTG_OP_CLEAR_CACHE,
|
||||
CTG_OP_MAX
|
||||
};
|
||||
|
||||
|
@ -328,6 +329,10 @@ typedef struct SCtgDropTbIndexMsg {
|
|||
char tbName[TSDB_TABLE_NAME_LEN];
|
||||
} SCtgDropTbIndexMsg;
|
||||
|
||||
typedef struct SCtgClearCacheMsg {
|
||||
SCatalog* pCtg;
|
||||
} SCtgClearCacheMsg;
|
||||
|
||||
typedef struct SCtgUpdateEpsetMsg {
|
||||
SCatalog* pCtg;
|
||||
char dbFName[TSDB_DB_FNAME_LEN];
|
||||
|
@ -471,8 +476,8 @@ typedef struct SCtgOperation {
|
|||
#define CTG_API_LEAVE(c) do { int32_t __code = c; CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); CTG_API_DEBUG("CTG API leave %s", __FUNCTION__); CTG_RET(__code); } while (0)
|
||||
#define CTG_API_ENTER() do { CTG_API_DEBUG("CTG API enter %s", __FUNCTION__); CTG_LOCK(CTG_READ, &gCtgMgmt.lock); if (atomic_load_8((int8_t*)&gCtgMgmt.exit)) { CTG_API_LEAVE(TSDB_CODE_CTG_OUT_OF_SERVICE); } } while (0)
|
||||
|
||||
void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p);
|
||||
void ctgdShowClusterCache(SCatalog* pCtg);
|
||||
void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p);
|
||||
void ctgdShowClusterCache(SCatalog* pCtg);
|
||||
int32_t ctgdShowCacheInfo(void);
|
||||
|
||||
int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq);
|
||||
|
@ -487,8 +492,8 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *action);
|
|||
int32_t ctgOpUpdateUser(SCtgCacheOperation *action);
|
||||
int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation);
|
||||
int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache);
|
||||
void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache);
|
||||
void ctgRUnlockVgInfo(SCtgDBCache *dbCache);
|
||||
void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache);
|
||||
void ctgRUnlockVgInfo(SCtgDBCache *dbCache);
|
||||
int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32_t *exist);
|
||||
int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
|
||||
int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName);
|
||||
|
@ -502,17 +507,20 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy
|
|||
int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq);
|
||||
int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet);
|
||||
int32_t ctgUpdateTbIndexEnqueue(SCatalog* pCtg, STableIndex **pIndex, bool syncOp);
|
||||
int32_t ctgClearCacheEnqueue(SCatalog* pCtg, bool syncOp);
|
||||
int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type);
|
||||
int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size);
|
||||
int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size);
|
||||
int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool syncReq);
|
||||
int32_t ctgStartUpdateThread();
|
||||
int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask);
|
||||
void ctgReleaseVgInfoToCache(SCatalog* pCtg, SCtgDBCache *dbCache);
|
||||
void ctgReleaseVgInfoToCache(SCatalog* pCtg, SCtgDBCache *dbCache);
|
||||
int32_t ctgReadTbIndexFromCache(SCatalog* pCtg, SName* pTableName, SArray** pRes);
|
||||
int32_t ctgDropTbIndexEnqueue(SCatalog* pCtg, SName* pName, bool syncOp);
|
||||
int32_t ctgOpDropTbIndex(SCtgCacheOperation *operation);
|
||||
int32_t ctgOpUpdateTbIndex(SCtgCacheOperation *operation);
|
||||
int32_t ctgOpClearCache(SCtgCacheOperation *operation);
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -535,22 +543,22 @@ int32_t ctgMakeAsyncRes(SCtgJob *pJob);
|
|||
int32_t ctgCloneVgInfo(SDBVgInfo *src, SDBVgInfo **dst);
|
||||
int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput);
|
||||
int32_t ctgGenerateVgList(SCatalog *pCtg, SHashObj *vgHash, SArray** pList);
|
||||
void ctgFreeJob(void* job);
|
||||
void ctgFreeHandle(SCatalog* pCtg);
|
||||
void ctgFreeVgInfo(SDBVgInfo *vgInfo);
|
||||
void ctgFreeJob(void* job);
|
||||
void ctgFreeHandle(SCatalog* pCtg);
|
||||
void ctgFreeVgInfo(SDBVgInfo *vgInfo);
|
||||
int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName *pTableName, SVgroupInfo *pVgroup);
|
||||
void ctgResetTbMetaTask(SCtgTask* pTask);
|
||||
void ctgFreeDbCache(SCtgDBCache *dbCache);
|
||||
void ctgResetTbMetaTask(SCtgTask* pTask);
|
||||
void ctgFreeDbCache(SCtgDBCache *dbCache);
|
||||
int32_t ctgStbVersionSortCompare(const void* key1, const void* key2);
|
||||
int32_t ctgDbVgVersionSortCompare(const void* key1, const void* key2);
|
||||
int32_t ctgStbVersionSearchCompare(const void* key1, const void* key2);
|
||||
int32_t ctgDbVgVersionSearchCompare(const void* key1, const void* key2);
|
||||
void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput);
|
||||
void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput);
|
||||
int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target);
|
||||
char *ctgTaskTypeStr(CTG_TASK_TYPE type);
|
||||
char * ctgTaskTypeStr(CTG_TASK_TYPE type);
|
||||
int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, SCtgTask* pTask);
|
||||
int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes);
|
||||
void ctgFreeSTableIndex(void *info);
|
||||
void ctgFreeSTableIndex(void *info);
|
||||
|
||||
|
||||
extern SCatalogMgmt gCtgMgmt;
|
||||
|
|
|
@ -105,7 +105,7 @@ int32_t ctgRefreshDBVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const char*
|
|||
code = ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, &DbOut, NULL);
|
||||
if (code) {
|
||||
if (CTG_DB_NOT_EXIST(code) && (NULL != dbCache)) {
|
||||
ctgDebug("db no longer exist, dbFName:%s, dbId:%" PRIx64, input.db, input.dbId);
|
||||
ctgDebug("db no longer exist, dbFName:%s, dbId:0x%" PRIx64, input.db, input.dbId);
|
||||
ctgDropDbCacheEnqueue(pCtg, input.db, input.dbId);
|
||||
}
|
||||
|
||||
|
@ -571,7 +571,7 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) {
|
|||
}
|
||||
|
||||
if (NULL == gCtgMgmt.pCluster) {
|
||||
qError("catalog cluster cache are not ready, clusterId:%" PRIx64, clusterId);
|
||||
qError("catalog cluster cache are not ready, clusterId:0x%" PRIx64, clusterId);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_NOT_READY);
|
||||
}
|
||||
|
||||
|
@ -583,7 +583,7 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) {
|
|||
|
||||
if (ctg && (*ctg)) {
|
||||
*catalogHandle = *ctg;
|
||||
qDebug("got catalog handle from cache, clusterId:%" PRIx64 ", CTG:%p", clusterId, *ctg);
|
||||
qDebug("got catalog handle from cache, clusterId:0x%" PRIx64 ", CTG:%p", clusterId, *ctg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -612,11 +612,11 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) {
|
|||
continue;
|
||||
}
|
||||
|
||||
qError("taosHashPut CTG to cache failed, clusterId:%" PRIx64, clusterId);
|
||||
qError("taosHashPut CTG to cache failed, clusterId:0x%" PRIx64, clusterId);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
qDebug("add CTG to cache, clusterId:%" PRIx64 ", CTG:%p", clusterId, clusterCtg);
|
||||
qDebug("add CTG to cache, clusterId:0x%" PRIx64 ", CTG:%p", clusterId, clusterCtg);
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -640,7 +640,7 @@ void catalogFreeHandle(SCatalog* pCtg) {
|
|||
}
|
||||
|
||||
if (taosHashRemove(gCtgMgmt.pCluster, &pCtg->clusterId, sizeof(pCtg->clusterId))) {
|
||||
ctgWarn("taosHashRemove from cluster failed, may already be freed, clusterId:%" PRIx64, pCtg->clusterId);
|
||||
ctgWarn("taosHashRemove from cluster failed, may already be freed, clusterId:0x%" PRIx64, pCtg->clusterId);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -650,7 +650,7 @@ void catalogFreeHandle(SCatalog* pCtg) {
|
|||
|
||||
ctgFreeHandle(pCtg);
|
||||
|
||||
ctgInfo("handle freed, culsterId:%" PRIx64, clusterId);
|
||||
ctgInfo("handle freed, culsterId:0x%" PRIx64, clusterId);
|
||||
}
|
||||
|
||||
int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId, int32_t* tableNum) {
|
||||
|
@ -1247,6 +1247,23 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) {
|
|||
CTG_API_LEAVE(ctgUpdateUserEnqueue(pCtg, pAuth, false));
|
||||
}
|
||||
|
||||
int32_t catalogClearCache(void) {
|
||||
CTG_API_ENTER();
|
||||
|
||||
qInfo("start to clear catalog cache");
|
||||
|
||||
if (NULL == gCtgMgmt.pCluster || atomic_load_8((int8_t*)&gCtgMgmt.exit)) {
|
||||
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int32_t code = ctgClearCacheEnqueue(NULL, true);
|
||||
|
||||
qInfo("clear catalog cache end, code: %s", tstrerror(code));
|
||||
|
||||
CTG_API_LEAVE(code);
|
||||
}
|
||||
|
||||
|
||||
void catalogDestroy(void) {
|
||||
qInfo("start to destroy catalog");
|
||||
|
||||
|
|
|
@ -622,7 +622,7 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) {
|
|||
SCtgJob* pJob = pTask->pJob;
|
||||
int32_t code = 0;
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " task %d end with rsp %s", pJob->queryId, pTask->taskId, tstrerror(rspCode));
|
||||
qDebug("QID:0x%" PRIx64 " task %d end with res %s", pJob->queryId, pTask->taskId, tstrerror(rspCode));
|
||||
|
||||
pTask->code = rspCode;
|
||||
|
||||
|
@ -1276,7 +1276,7 @@ int32_t ctgLaunchJob(SCtgJob *pJob) {
|
|||
for (int32_t i = 0; i < taskNum; ++i) {
|
||||
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " start to launch task %d", pJob->queryId, pTask->taskId);
|
||||
qDebug("QID:0x%" PRIx64 " ctg start to launch task %d", pJob->queryId, pTask->taskId);
|
||||
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));
|
||||
}
|
||||
|
||||
|
|
|
@ -69,6 +69,11 @@ SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = {
|
|||
CTG_OP_DROP_TB_INDEX,
|
||||
"drop tbIndex",
|
||||
ctgOpDropTbIndex
|
||||
},
|
||||
{
|
||||
CTG_OP_CLEAR_CACHE,
|
||||
"clear cache",
|
||||
ctgOpClearCache
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -81,7 +86,7 @@ int32_t ctgRLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) {
|
|||
if (dbCache->deleted) {
|
||||
CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock);
|
||||
|
||||
ctgDebug("db is dropping, dbId:%"PRIx64, dbCache->dbId);
|
||||
ctgDebug("db is dropping, dbId:0x%"PRIx64, dbCache->dbId);
|
||||
|
||||
*inCache = false;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -92,7 +97,7 @@ int32_t ctgRLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) {
|
|||
CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock);
|
||||
|
||||
*inCache = false;
|
||||
ctgDebug("db vgInfo is empty, dbId:%"PRIx64, dbCache->dbId);
|
||||
ctgDebug("db vgInfo is empty, dbId:0x%"PRIx64, dbCache->dbId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -105,7 +110,7 @@ int32_t ctgWLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache) {
|
|||
CTG_LOCK(CTG_WRITE, &dbCache->vgCache.vgLock);
|
||||
|
||||
if (dbCache->deleted) {
|
||||
ctgDebug("db is dropping, dbId:%"PRIx64, dbCache->dbId);
|
||||
ctgDebug("db is dropping, dbId:0x%"PRIx64, dbCache->dbId);
|
||||
CTG_UNLOCK(CTG_WRITE, &dbCache->vgCache.vgLock);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
|
||||
}
|
||||
|
@ -280,27 +285,27 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog* pCtg, char *dbFName, uint64_t suid,
|
|||
int32_t sz = 0;
|
||||
char* stName = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
|
||||
if (NULL == stName) {
|
||||
ctgDebug("stb %" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
pCache = taosHashAcquire(dbCache->tbCache, stName, strlen(stName));
|
||||
if (NULL == pCache) {
|
||||
ctgDebug("stb %" PRIx64 " name %s not in cache, dbFName:%s", suid, stName, dbFName);
|
||||
ctgDebug("stb 0x%" PRIx64 " name %s not in cache, dbFName:%s", suid, stName, dbFName);
|
||||
taosHashRelease(dbCache->stbCache, stName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
CTG_LOCK(CTG_READ, &pCache->metaLock);
|
||||
if (NULL == pCache->pMeta) {
|
||||
ctgDebug("stb %" PRIx64 " meta not in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", suid, dbFName);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
*pDb = dbCache;
|
||||
*pTb = pCache;
|
||||
|
||||
ctgDebug("stb %" PRIx64 " meta got in cache, dbFName:%s", suid, dbFName);
|
||||
ctgDebug("stb 0x%" PRIx64 " meta got in cache, dbFName:%s", suid, dbFName);
|
||||
|
||||
CTG_CACHE_STAT_INC(tbMetaHitNum, 1);
|
||||
|
||||
|
@ -434,14 +439,14 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
|
|||
if (NULL == tbCache) {
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
taosMemoryFreeClear(*pTableMeta);
|
||||
ctgDebug("stb %" PRIx64 " meta not in cache", ctx->tbInfo.suid);
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache", ctx->tbInfo.suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STableMeta* stbMeta = tbCache->pMeta;
|
||||
if (stbMeta->suid != ctx->tbInfo.suid) {
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
ctgError("stb suid %" PRIx64 " in stbCache mis-match, expected suid:%"PRIx64 , stbMeta->suid, ctx->tbInfo.suid);
|
||||
ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%"PRIx64 , stbMeta->suid, ctx->tbInfo.suid);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -492,7 +497,7 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver,
|
|||
*sver = tbMeta->sversion;
|
||||
*tver = tbMeta->tversion;
|
||||
|
||||
ctgDebug("Got tb %s ver from cache, dbFName:%s, tbType:%d, sver:%d, tver:%d, suid:%" PRIx64,
|
||||
ctgDebug("Got tb %s ver from cache, dbFName:%s, tbType:%d, sver:%d, tver:%d, suid:0x%" PRIx64,
|
||||
pTableName->tname, dbFName, *tbType, *sver, *tver, *suid);
|
||||
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
|
@ -507,14 +512,14 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver,
|
|||
ctgAcquireStbMetaFromCache(pCtg, dbFName, *suid, &dbCache, &tbCache);
|
||||
if (NULL == tbCache) {
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
ctgDebug("stb %" PRIx64 " meta not in cache", *suid);
|
||||
ctgDebug("stb 0x%" PRIx64 " meta not in cache", *suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STableMeta* stbMeta = tbCache->pMeta;
|
||||
if (stbMeta->suid != *suid) {
|
||||
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
|
||||
ctgError("stb suid %" PRIx64 " in stbCache mis-match, expected suid:%" PRIx64 , stbMeta->suid, *suid);
|
||||
ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid:0x%" PRIx64 , stbMeta->suid, *suid);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -990,6 +995,33 @@ _return:
|
|||
}
|
||||
|
||||
|
||||
int32_t ctgClearCacheEnqueue(SCatalog* pCtg, bool syncOp) {
|
||||
int32_t code = 0;
|
||||
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
|
||||
op->opId = CTG_OP_CLEAR_CACHE;
|
||||
op->syncOp = syncOp;
|
||||
|
||||
SCtgClearCacheMsg *msg = taosMemoryMalloc(sizeof(SCtgClearCacheMsg));
|
||||
if (NULL == msg) {
|
||||
ctgError("malloc %d failed", (int32_t)sizeof(SCtgClearCacheMsg));
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
|
||||
}
|
||||
|
||||
msg->pCtg = pCtg;
|
||||
op->data = msg;
|
||||
|
||||
CTG_ERR_JRET(ctgEnqueue(pCtg, op));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
||||
taosMemoryFreeClear(msg);
|
||||
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
||||
|
||||
int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type) {
|
||||
mgmt->slotRIdx = 0;
|
||||
mgmt->slotNum = rentSec / CTG_RENT_SLOT_SECOND;
|
||||
|
@ -1019,19 +1051,19 @@ int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size)
|
|||
if (NULL == slot->meta) {
|
||||
slot->meta = taosArrayInit(CTG_DEFAULT_RENT_SLOT_SIZE, size);
|
||||
if (NULL == slot->meta) {
|
||||
qError("taosArrayInit %d failed, id:%"PRIx64", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx, mgmt->type);
|
||||
qError("taosArrayInit %d failed, id:0x%"PRIx64", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
if (NULL == taosArrayPush(slot->meta, meta)) {
|
||||
qError("taosArrayPush meta to rent failed, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qError("taosArrayPush meta to rent failed, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR);
|
||||
}
|
||||
|
||||
slot->needSort = true;
|
||||
|
||||
qDebug("add meta to rent, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qDebug("add meta to rent, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -1047,7 +1079,7 @@ int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t si
|
|||
|
||||
CTG_LOCK(CTG_WRITE, &slot->lock);
|
||||
if (NULL == slot->meta) {
|
||||
qError("empty meta slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qError("empty meta slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -1060,20 +1092,20 @@ int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t si
|
|||
|
||||
void *orig = taosArraySearch(slot->meta, &id, searchCompare, TD_EQ);
|
||||
if (NULL == orig) {
|
||||
qDebug("meta not found in slot, id:%"PRIx64", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta));
|
||||
qDebug("meta not found in slot, id:0x%"PRIx64", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta));
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
memcpy(orig, meta, size);
|
||||
|
||||
qDebug("meta in rent updated, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qDebug("meta in rent updated, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
|
||||
_return:
|
||||
|
||||
CTG_UNLOCK(CTG_WRITE, &slot->lock);
|
||||
|
||||
if (code) {
|
||||
qDebug("meta in rent update failed, will try to add it, code:%x, id:%"PRIx64", slot idx:%d, type:%d", code, id, widx, mgmt->type);
|
||||
qDebug("meta in rent update failed, will try to add it, code:%x, id:0x%"PRIx64", slot idx:%d, type:%d", code, id, widx, mgmt->type);
|
||||
CTG_RET(ctgMetaRentAdd(mgmt, meta, id, size));
|
||||
}
|
||||
|
||||
|
@ -1088,7 +1120,7 @@ int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortComp
|
|||
|
||||
CTG_LOCK(CTG_WRITE, &slot->lock);
|
||||
if (NULL == slot->meta) {
|
||||
qError("empty meta slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qError("empty meta slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -1100,13 +1132,13 @@ int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortComp
|
|||
|
||||
int32_t idx = taosArraySearchIdx(slot->meta, &id, searchCompare, TD_EQ);
|
||||
if (idx < 0) {
|
||||
qError("meta not found in slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qError("meta not found in slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
taosArrayRemove(slot->meta, idx);
|
||||
|
||||
qDebug("meta in rent removed, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
qDebug("meta in rent removed, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -1219,11 +1251,11 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) {
|
|||
SDbVgVersion vgVersion = {.dbId = newDBCache.dbId, .vgVersion = -1};
|
||||
strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName));
|
||||
|
||||
ctgDebug("db added to cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId);
|
||||
ctgDebug("db added to cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId);
|
||||
|
||||
CTG_ERR_RET(ctgMetaRentAdd(&pCtg->dbRent, &vgVersion, dbId, sizeof(SDbVgVersion)));
|
||||
|
||||
ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:%"PRIx64, dbFName, vgVersion.vgVersion, dbId);
|
||||
ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:0x%"PRIx64, dbFName, vgVersion.vgVersion, dbId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -1246,7 +1278,7 @@ void ctgRemoveStbRent(SCatalog* pCtg, SCtgDBCache *dbCache) {
|
|||
suid = taosHashGetKey(pIter, NULL);
|
||||
|
||||
if (TSDB_CODE_SUCCESS == ctgMetaRentRemove(&pCtg->stbRent, *suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)) {
|
||||
ctgDebug("stb removed from rent, suid:%"PRIx64, *suid);
|
||||
ctgDebug("stb removed from rent, suid:0x%"PRIx64, *suid);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(dbCache->stbCache, pIter);
|
||||
|
@ -1257,7 +1289,7 @@ void ctgRemoveStbRent(SCatalog* pCtg, SCtgDBCache *dbCache) {
|
|||
int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* dbFName) {
|
||||
uint64_t dbId = dbCache->dbId;
|
||||
|
||||
ctgInfo("start to remove db from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbCache->dbId);
|
||||
ctgInfo("start to remove db from cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbCache->dbId);
|
||||
|
||||
CTG_LOCK(CTG_WRITE, &dbCache->dbLock);
|
||||
|
||||
|
@ -1268,7 +1300,7 @@ int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* d
|
|||
CTG_UNLOCK(CTG_WRITE, &dbCache->dbLock);
|
||||
|
||||
CTG_ERR_RET(ctgMetaRentRemove(&pCtg->dbRent, dbId, ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare));
|
||||
ctgDebug("db removed from rent, dbFName:%s, dbId:%"PRIx64, dbFName, dbId);
|
||||
ctgDebug("db removed from rent, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId);
|
||||
|
||||
if (taosHashRemove(pCtg->dbCache, dbFName, strlen(dbFName))) {
|
||||
ctgInfo("taosHashRemove from dbCache failed, may be removed, dbFName:%s", dbFName);
|
||||
|
@ -1276,7 +1308,7 @@ int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* d
|
|||
}
|
||||
|
||||
CTG_CACHE_STAT_DEC(dbNum, 1);
|
||||
ctgInfo("db removed from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId);
|
||||
ctgInfo("db removed from cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1339,7 +1371,7 @@ int32_t ctgUpdateRentStbVersion(SCatalog *pCtg, char* dbFName, char* tbName, uin
|
|||
|
||||
CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->stbRent, &metaRent, metaRent.suid, sizeof(SSTableVersion), ctgStbVersionSortCompare, ctgStbVersionSearchCompare));
|
||||
|
||||
ctgDebug("db %s,%" PRIx64 " stb %s,%" PRIx64 " sver %d tver %d smaVer %d updated to stbRent",
|
||||
ctgDebug("db %s,0x%" PRIx64 " stb %s,0x%" PRIx64 " sver %d tver %d smaVer %d updated to stbRent",
|
||||
dbFName, dbId, tbName, suid, metaRent.sversion, metaRent.tversion, metaRent.smaVer);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1349,7 +1381,7 @@ int32_t ctgUpdateRentStbVersion(SCatalog *pCtg, char* dbFName, char* tbName, uin
|
|||
int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, uint64_t dbId, char *tbName, STableMeta *meta, int32_t metaSize) {
|
||||
if (NULL == dbCache->tbCache || NULL == dbCache->stbCache) {
|
||||
taosMemoryFree(meta);
|
||||
ctgError("db is dropping, dbId:%"PRIx64, dbCache->dbId);
|
||||
ctgError("db is dropping, dbId:0x%"PRIx64, dbCache->dbId);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
|
||||
}
|
||||
|
||||
|
@ -1370,10 +1402,10 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
|
||||
if (origType == TSDB_SUPER_TABLE) {
|
||||
if (taosHashRemove(dbCache->stbCache, &orig->suid, sizeof(orig->suid))) {
|
||||
ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid);
|
||||
ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:0x%"PRIx64, dbFName, tbName, orig->suid);
|
||||
} else {
|
||||
CTG_CACHE_STAT_DEC(stblNum, 1);
|
||||
ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid);
|
||||
ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:0x%"PRIx64, dbFName, tbName, orig->suid);
|
||||
}
|
||||
|
||||
origSuid = orig->suid;
|
||||
|
@ -1407,13 +1439,13 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
|
|||
}
|
||||
|
||||
if (origSuid != meta->suid && taosHashPut(dbCache->stbCache, &meta->suid, sizeof(meta->suid), tbName, strlen(tbName) + 1) != 0) {
|
||||
ctgError("taosHashPut to stable cache failed, suid:%"PRIx64, meta->suid);
|
||||
ctgError("taosHashPut to stable cache failed, suid:0x%"PRIx64, meta->suid);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
|
||||
}
|
||||
|
||||
CTG_CACHE_STAT_INC(stblNum, 1);
|
||||
|
||||
ctgDebug("stb %" PRIx64 " updated to cache, dbFName:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName, meta->tableType);
|
||||
ctgDebug("stb 0x%" PRIx64 " updated to cache, dbFName:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName, meta->tableType);
|
||||
|
||||
CTG_ERR_RET(ctgUpdateRentStbVersion(pCtg, dbFName, tbName, dbId, meta->suid, pCache));
|
||||
|
||||
|
@ -1424,7 +1456,7 @@ int32_t ctgWriteTbIndexToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char* dbFNa
|
|||
if (NULL == dbCache->tbCache) {
|
||||
ctgFreeSTableIndex(*index);
|
||||
taosMemoryFreeClear(*index);
|
||||
ctgError("db is dropping, dbId:%"PRIx64, dbCache->dbId);
|
||||
ctgError("db is dropping, dbId:0x%"PRIx64, dbCache->dbId);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
|
||||
}
|
||||
|
||||
|
@ -1510,7 +1542,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
|
|||
SCtgDBCache *dbCache = NULL;
|
||||
CTG_ERR_RET(ctgGetAddDBCache(msg->pCtg, dbFName, msg->dbId, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:%"PRIx64, dbFName, msg->dbId);
|
||||
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%"PRIx64, dbFName, msg->dbId);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -1540,7 +1572,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
|
|||
vgCache->vgInfo = dbInfo;
|
||||
msg->dbInfo = NULL;
|
||||
|
||||
ctgDebug("db vgInfo updated, dbFName:%s, vgVer:%d, dbId:%"PRIx64, dbFName, vgVersion.vgVersion, vgVersion.dbId);
|
||||
ctgDebug("db vgInfo updated, dbFName:%s, vgVer:%d, dbId:0x%"PRIx64, dbFName, vgVersion.vgVersion, vgVersion.dbId);
|
||||
|
||||
ctgWUnlockVgInfo(dbCache);
|
||||
|
||||
|
@ -1569,7 +1601,7 @@ int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if (dbCache->dbId != msg->dbId) {
|
||||
ctgInfo("dbId already updated, dbFName:%s, dbId:%"PRIx64 ", targetId:%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId);
|
||||
ctgInfo("dbId already updated, dbFName:%s, dbId:0x%"PRIx64 ", targetId:0x%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
|
@ -1629,7 +1661,7 @@ int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
|
|||
|
||||
CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pMeta->dbFName, pMeta->dbId, &dbCache));
|
||||
if (NULL == dbCache) {
|
||||
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:%" PRIx64, pMeta->dbFName, pMeta->dbId);
|
||||
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%" PRIx64, pMeta->dbFName, pMeta->dbId);
|
||||
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -1673,27 +1705,28 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if (msg->dbId && (dbCache->dbId != msg->dbId)) {
|
||||
ctgDebug("dbId already modified, dbFName:%s, current:%"PRIx64", dbId:%"PRIx64", stb:%s, suid:%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId, msg->stbName, msg->suid);
|
||||
ctgDebug("dbId already modified, dbFName:%s, current:0x%"PRIx64", dbId:0x%"PRIx64", stb:%s, suid:0x%"PRIx64,
|
||||
msg->dbFName, dbCache->dbId, msg->dbId, msg->stbName, msg->suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (taosHashRemove(dbCache->stbCache, &msg->suid, sizeof(msg->suid))) {
|
||||
ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
} else {
|
||||
CTG_CACHE_STAT_DEC(stblNum, 1);
|
||||
}
|
||||
|
||||
if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) {
|
||||
ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
} else {
|
||||
CTG_CACHE_STAT_DEC(tblNum, 1);
|
||||
}
|
||||
|
||||
ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
|
||||
CTG_ERR_JRET(ctgMetaRentRemove(&msg->pCtg->stbRent, msg->suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare));
|
||||
|
||||
ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
|
||||
|
||||
_return:
|
||||
|
||||
|
@ -1714,7 +1747,7 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
|
|||
}
|
||||
|
||||
if (dbCache->dbId != msg->dbId) {
|
||||
ctgDebug("dbId %" PRIx64 " not match with curId %"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName);
|
||||
ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1898,6 +1931,37 @@ _return:
|
|||
}
|
||||
|
||||
|
||||
int32_t ctgOpClearCache(SCtgCacheOperation *operation) {
|
||||
int32_t code = 0;
|
||||
SCtgClearCacheMsg *msg = operation->data;
|
||||
SCatalog* pCtg = msg->pCtg;
|
||||
|
||||
if (pCtg) {
|
||||
catalogFreeHandle(pCtg);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
void* pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
|
||||
while (pIter) {
|
||||
pCtg = *(SCatalog**)pIter;
|
||||
|
||||
if (pCtg) {
|
||||
catalogFreeHandle(pCtg);
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(gCtgMgmt.pCluster, pIter);
|
||||
}
|
||||
|
||||
taosHashClear(gCtgMgmt.pCluster);
|
||||
|
||||
_return:
|
||||
|
||||
taosMemoryFreeClear(msg);
|
||||
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
||||
|
||||
void ctgUpdateThreadUnexpectedStopped(void) {
|
||||
if (!atomic_load_8((int8_t*)&gCtgMgmt.exit) && CTG_IS_LOCKED(&gCtgMgmt.lock) > 0) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock);
|
||||
}
|
||||
|
@ -1971,11 +2035,10 @@ void* ctgUpdateThreadFunc(void* param) {
|
|||
|
||||
CTG_RT_STAT_INC(qDoneNum, 1);
|
||||
|
||||
ctgdShowCacheInfo();
|
||||
ctgdShowClusterCache(pCtg);
|
||||
}
|
||||
|
||||
if (CTG_IS_LOCKED(&gCtgMgmt.lock)) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock);
|
||||
|
||||
qInfo("catalog update thread stopped");
|
||||
|
||||
return NULL;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "catalogInt.h"
|
||||
|
||||
extern SCatalogMgmt gCtgMgmt;
|
||||
SCtgDebug gCTGDebug = {.lockEnable = true, .apiEnable = true};
|
||||
SCtgDebug gCTGDebug = {.cacheEnable = true};
|
||||
|
||||
void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) {
|
||||
ASSERT(*(int32_t*)param == 1);
|
||||
|
@ -40,9 +40,9 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) {
|
|||
STableComInfo *c = &p->tableInfo;
|
||||
|
||||
if (TSDB_CHILD_TABLE == p->tableType) {
|
||||
qDebug("table meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, p->tableType, p->vgId, p->uid, p->suid);
|
||||
qDebug("table meta: type:%d, vgId:%d, uid:0x%" PRIx64 ",suid:0x%" PRIx64, p->tableType, p->vgId, p->uid, p->suid);
|
||||
} else {
|
||||
qDebug("table meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d",
|
||||
qDebug("table meta: type:%d, vgId:%d, uid:0x%" PRIx64 ",suid:0x%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d",
|
||||
p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize);
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) {
|
|||
num = taosArrayGetSize(pResult->pDbInfo);
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i);
|
||||
qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId);
|
||||
qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:0x%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId);
|
||||
}
|
||||
} else {
|
||||
qDebug("empty db info");
|
||||
|
@ -333,10 +333,10 @@ void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p) {
|
|||
STableComInfo *c = &p->tableInfo;
|
||||
|
||||
if (TSDB_CHILD_TABLE == p->tableType) {
|
||||
ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, tbName, p->tableType, p->vgId, p->uid, p->suid);
|
||||
ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:0x%" PRIx64 ",suid:0x%" PRIx64, tbName, p->tableType, p->vgId, p->uid, p->suid);
|
||||
return;
|
||||
} else {
|
||||
ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d",
|
||||
ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:0x%" PRIx64 ",suid:0x%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d",
|
||||
tbName, p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize);
|
||||
}
|
||||
|
||||
|
@ -377,7 +377,7 @@ void ctgdShowDBCache(SCatalog* pCtg, SHashObj *dbHash) {
|
|||
}
|
||||
}
|
||||
|
||||
ctgDebug("[%d] db [%.*s][%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d",
|
||||
ctgDebug("[%d] db [%.*s][0x%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d",
|
||||
i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, vgNum);
|
||||
|
||||
pIter = taosHashIterate(dbHash, pIter);
|
||||
|
@ -392,13 +392,13 @@ void ctgdShowClusterCache(SCatalog* pCtg) {
|
|||
return;
|
||||
}
|
||||
|
||||
ctgDebug("## cluster %"PRIx64" %p cache Info BEGIN ##", pCtg->clusterId, pCtg);
|
||||
ctgDebug("## cluster 0x%"PRIx64" %p cache Info BEGIN ##", pCtg->clusterId, pCtg);
|
||||
ctgDebug("db:%d meta:%d stb:%d dbRent:%d stbRent:%d", ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM),
|
||||
ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM));
|
||||
|
||||
ctgdShowDBCache(pCtg, pCtg->dbCache);
|
||||
|
||||
ctgDebug("## cluster %"PRIx64" %p cache Info END ##", pCtg->clusterId, pCtg);
|
||||
ctgDebug("## cluster 0x%"PRIx64" %p cache Info END ##", pCtg->clusterId, pCtg);
|
||||
}
|
||||
|
||||
int32_t ctgdShowCacheInfo(void) {
|
||||
|
@ -407,6 +407,8 @@ int32_t ctgdShowCacheInfo(void) {
|
|||
}
|
||||
|
||||
CTG_API_ENTER();
|
||||
|
||||
qDebug("# total catalog cluster number %d #", taosHashGetSize(gCtgMgmt.pCluster));
|
||||
|
||||
SCatalog *pCtg = NULL;
|
||||
void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
|
||||
|
|
|
@ -186,13 +186,13 @@ int32_t ctgHandleMsgCallback(void *param, const SDataBuf *pMsg, int32_t rspCode)
|
|||
|
||||
SCtgJob* pJob = taosAcquireRef(gCtgMgmt.jobPool, cbParam->refId);
|
||||
if (NULL == pJob) {
|
||||
qDebug("job refId %" PRIx64 " already dropped", cbParam->refId);
|
||||
qDebug("ctg job refId 0x%" PRIx64 " already dropped", cbParam->refId);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
SCtgTask *pTask = taosArrayGet(pJob->pTasks, cbParam->taskId);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1));
|
||||
qDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1));
|
||||
|
||||
CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, cbParam->reqType, pMsg, rspCode));
|
||||
|
||||
|
@ -263,7 +263,7 @@ int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTask* pTask
|
|||
CTG_ERR_JRET(code);
|
||||
}
|
||||
|
||||
ctgDebug("req msg sent, reqId:0x%" PRIx64 ", msg type:%d, %s", pTask->pJob->queryId, msgType, TMSG_INFO(msgType));
|
||||
ctgDebug("ctg req msg sent, reqId:0x%" PRIx64 ", msg type:%d, %s", pTask->pJob->queryId, msgType, TMSG_INFO(msgType));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
|
|
@ -434,7 +434,7 @@ void ctgFreeJob(void* job) {
|
|||
|
||||
taosMemoryFree(job);
|
||||
|
||||
qDebug("QID:%" PRIx64 ", job %" PRIx64 " freed", qid, rid);
|
||||
qDebug("QID:0x%" PRIx64 ", ctg job 0x%" PRIx64 " freed", qid, rid);
|
||||
}
|
||||
|
||||
int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target) {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "command.h"
|
||||
#include "catalog.h"
|
||||
#include "tdatablock.h"
|
||||
|
||||
static int32_t getSchemaBytes(const SSchema* pSchema) {
|
||||
|
@ -120,8 +121,7 @@ static int32_t execDescribe(SNode* pStmt, SRetrieveTableRsp** pRsp) {
|
|||
}
|
||||
|
||||
static int32_t execResetQueryCache() {
|
||||
// todo
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return catalogClearCache();
|
||||
}
|
||||
|
||||
int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) {
|
||||
|
|
|
@ -194,6 +194,11 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
|
|||
pPhysiChildren = fillPhysiNode->node.pChildren;
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN: {
|
||||
STableMergeScanPhysiNode *mergePhysiNode = (STableMergeScanPhysiNode *)pNode;
|
||||
pPhysiChildren = mergePhysiNode->scan.node.pChildren;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
qError("not supported physical node type %d", pNode->type);
|
||||
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
|
@ -398,6 +403,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
|
||||
STableScanPhysiNode *pTblScanNode = (STableScanPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_TBL_SCAN_FORMAT, pTblScanNode->scan.tableName.tname);
|
||||
|
|
|
@ -537,6 +537,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
goto _error;
|
||||
}
|
||||
|
||||
//taosSsleep(20);
|
||||
|
||||
SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc;
|
||||
|
||||
int32_t numOfCols = 0;
|
||||
|
|
|
@ -172,8 +172,6 @@ const char* nodesNodeName(ENodeType type) {
|
|||
return "ShowSubscribesStmt";
|
||||
case QUERY_NODE_SHOW_SMAS_STMT:
|
||||
return "ShowSmasStmt";
|
||||
case QUERY_NODE_SHOW_CONFIGS_STMT:
|
||||
return "ShowConfigsStmt";
|
||||
case QUERY_NODE_SHOW_QUERIES_STMT:
|
||||
return "ShowQueriesStmt";
|
||||
case QUERY_NODE_SHOW_VNODES_STMT:
|
||||
|
|
|
@ -200,7 +200,6 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
case QUERY_NODE_SHOW_CONSUMERS_STMT:
|
||||
case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
|
||||
case QUERY_NODE_SHOW_SMAS_STMT:
|
||||
case QUERY_NODE_SHOW_CONFIGS_STMT:
|
||||
case QUERY_NODE_SHOW_CONNECTIONS_STMT:
|
||||
case QUERY_NODE_SHOW_QUERIES_STMT:
|
||||
case QUERY_NODE_SHOW_VNODES_STMT:
|
||||
|
@ -619,7 +618,6 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
case QUERY_NODE_SHOW_CONSUMERS_STMT:
|
||||
case QUERY_NODE_SHOW_SUBSCRIBES_STMT:
|
||||
case QUERY_NODE_SHOW_SMAS_STMT:
|
||||
case QUERY_NODE_SHOW_CONFIGS_STMT:
|
||||
case QUERY_NODE_SHOW_CONNECTIONS_STMT:
|
||||
case QUERY_NODE_SHOW_QUERIES_STMT:
|
||||
case QUERY_NODE_SHOW_VNODES_STMT:
|
||||
|
|
|
@ -1451,7 +1451,7 @@ SNode* createKillQueryStmt(SAstCreateContext* pCxt, const SToken* pQueryId) {
|
|||
CHECK_PARSER_STATUS(pCxt);
|
||||
SKillQueryStmt* pStmt = (SKillQueryStmt*)nodesMakeNode(QUERY_NODE_KILL_QUERY_STMT);
|
||||
CHECK_OUT_OF_MEM(pStmt);
|
||||
strncpy(pStmt->queryId, pQueryId->z, TMIN(pQueryId->n, sizeof(pStmt->queryId) - 1));
|
||||
trimString(pQueryId->z, pQueryId->n, pStmt->queryId, sizeof(pStmt->queryId) - 1);
|
||||
return (SNode*)pStmt;
|
||||
}
|
||||
|
||||
|
|
|
@ -387,6 +387,21 @@ static int32_t collectMetaKeyFromShowQueries(SCollectMetaKeyCxt* pCxt, SShowStmt
|
|||
pCxt->pMetaCache);
|
||||
}
|
||||
|
||||
static int32_t collectMetaKeyFromShowConfigs(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
|
||||
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS,
|
||||
pCxt->pMetaCache);
|
||||
}
|
||||
|
||||
static int32_t collectMetaKeyFromShowVariables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
|
||||
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS,
|
||||
pCxt->pMetaCache);
|
||||
}
|
||||
|
||||
static int32_t collectMetaKeyFromShowApps(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
|
||||
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_APPS,
|
||||
pCxt->pMetaCache);
|
||||
}
|
||||
|
||||
static int32_t collectMetaKeyFromShowTransactions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
|
||||
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TRANS,
|
||||
pCxt->pMetaCache);
|
||||
|
@ -461,6 +476,10 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
|
|||
return collectMetaKeyFromShowConnections(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_SHOW_QUERIES_STMT:
|
||||
return collectMetaKeyFromShowQueries(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_SHOW_VARIABLE_STMT:
|
||||
return collectMetaKeyFromShowVariables(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_SHOW_APPS_STMT:
|
||||
return collectMetaKeyFromShowApps(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
|
||||
return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt);
|
||||
case QUERY_NODE_DELETE_STMT:
|
||||
|
|
|
@ -3890,7 +3890,7 @@ static int32_t translateDescribe(STranslateContext* pCxt, SDescribeStmt* pStmt)
|
|||
static int32_t translateKillConnection(STranslateContext* pCxt, SKillStmt* pStmt) {
|
||||
SKillConnReq killReq = {0};
|
||||
killReq.connId = pStmt->targetId;
|
||||
return buildCmdMsg(pCxt, TDMT_MND_KILL_CONN, (FSerializeFunc)tSerializeSKillQueryReq, &killReq);
|
||||
return buildCmdMsg(pCxt, TDMT_MND_KILL_CONN, (FSerializeFunc)tSerializeSKillConnReq, &killReq);
|
||||
}
|
||||
|
||||
static int32_t translateKillQuery(STranslateContext* pCxt, SKillQueryStmt* pStmt) {
|
||||
|
|
|
@ -731,6 +731,7 @@ static int32_t opkDoOptimized(SOptimizeContext* pCxt, SSortLogicNode* pSort, SNo
|
|||
FOREACH(pNode, pSort->node.pParent->pChildren) {
|
||||
if (nodesEqualNode(pNode, (SNode*)pSort)) {
|
||||
REPLACE_NODE(pDownNode);
|
||||
((SLogicNode*)pDownNode)->pParent = pSort->node.pParent;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,10 +177,6 @@ char* jobTaskStatusStr(int32_t status) {
|
|||
return "SUCCEED";
|
||||
case JOB_TASK_STATUS_FAILED:
|
||||
return "FAILED";
|
||||
case JOB_TASK_STATUS_CANCELLING:
|
||||
return "CANCELLING";
|
||||
case JOB_TASK_STATUS_CANCELLED:
|
||||
return "CANCELLED";
|
||||
case JOB_TASK_STATUS_DROPPING:
|
||||
return "DROPPING";
|
||||
default:
|
||||
|
|
|
@ -44,40 +44,30 @@ int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus,
|
|||
break;
|
||||
case JOB_TASK_STATUS_EXECUTING:
|
||||
if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED &&
|
||||
newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING &&
|
||||
newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_PARTIAL_SUCCEED:
|
||||
if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED &&
|
||||
newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED &&
|
||||
newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_SUCCEED:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING &&
|
||||
newStatus != JOB_TASK_STATUS_FAILED) {
|
||||
if (newStatus != JOB_TASK_STATUS_DROPPING && newStatus != JOB_TASK_STATUS_FAILED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_FAILED:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
if (newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
break;
|
||||
|
||||
case JOB_TASK_STATUS_CANCELLING:
|
||||
if (newStatus != JOB_TASK_STATUS_CANCELLED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_CANCELLED:
|
||||
case JOB_TASK_STATUS_DROPPING:
|
||||
if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
|
|
|
@ -614,6 +614,8 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_FETCH);
|
||||
|
||||
qwBuildAndSendFetchRsp(&qwMsg->connInfo, rsp, dataLen, code);
|
||||
rsp = NULL;
|
||||
|
||||
QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code,
|
||||
tstrerror(code), dataLen);
|
||||
} else {
|
||||
|
@ -633,7 +635,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
|
|||
rsp = NULL;
|
||||
|
||||
qwMsg->connInfo = ctx->dataConnInfo;
|
||||
qwBuildAndSendFetchRsp(&qwMsg->connInfo, rsp, 0, code);
|
||||
qwBuildAndSendFetchRsp(&qwMsg->connInfo, NULL, 0, code);
|
||||
QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code, tstrerror(code),
|
||||
0);
|
||||
}
|
||||
|
|
|
@ -48,6 +48,12 @@ enum {
|
|||
SCH_FETCH_CB,
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
SCH_OP_NULL = 0,
|
||||
SCH_OP_EXEC,
|
||||
SCH_OP_FETCH,
|
||||
} SCH_OP_TYPE;
|
||||
|
||||
typedef struct SSchTrans {
|
||||
void *pTrans;
|
||||
void *pHandle;
|
||||
|
@ -188,11 +194,15 @@ typedef struct SSchTask {
|
|||
|
||||
typedef struct SSchJobAttr {
|
||||
EExplainMode explainMode;
|
||||
bool syncSchedule;
|
||||
bool queryJob;
|
||||
bool needFlowCtrl;
|
||||
} SSchJobAttr;
|
||||
|
||||
typedef struct {
|
||||
int32_t op;
|
||||
bool sync;
|
||||
} SSchOpStatus;
|
||||
|
||||
typedef struct SSchJob {
|
||||
int64_t refId;
|
||||
uint64_t queryId;
|
||||
|
@ -217,8 +227,8 @@ typedef struct SSchJob {
|
|||
int8_t status;
|
||||
SQueryNodeAddr resNode;
|
||||
tsem_t rspSem;
|
||||
int8_t userFetch;
|
||||
int32_t remoteFetch;
|
||||
SSchOpStatus opStatus;
|
||||
bool *reqKilled;
|
||||
SSchTask *fetchTask;
|
||||
int32_t errCode;
|
||||
SRWLatch resLock;
|
||||
|
@ -227,7 +237,6 @@ typedef struct SSchJob {
|
|||
int32_t resNumOfRows;
|
||||
SSchResInfo userRes;
|
||||
const char *sql;
|
||||
int32_t userCb;
|
||||
SQueryProfileSummary summary;
|
||||
} SSchJob;
|
||||
|
||||
|
@ -285,6 +294,10 @@ extern SSchedulerMgmt schMgmt;
|
|||
#define SCH_GET_JOB_STATUS(job) atomic_load_8(&(job)->status)
|
||||
#define SCH_GET_JOB_STATUS_STR(job) jobTaskStatusStr(SCH_GET_JOB_STATUS(job))
|
||||
|
||||
#define SCH_JOB_IN_SYNC_OP(job) ((job)->opStatus.op && (job)->opStatus.sync)
|
||||
#define SCH_JOB_IN_ASYNC_EXEC_OP(job) (((job)->opStatus.op == SCH_OP_EXEC) && (!(job)->opStatus.sync))
|
||||
#define SCH_JOB_IN_ASYNC_FETCH_OP(job) (((job)->opStatus.op == SCH_OP_FETCH) && (!(job)->opStatus.sync))
|
||||
|
||||
#define SCH_SET_JOB_NEED_FLOW_CTRL(_job) (_job)->attr.needFlowCtrl = true
|
||||
#define SCH_JOB_NEED_FLOW_CTRL(_job) ((_job)->attr.needFlowCtrl)
|
||||
#define SCH_TASK_NEED_FLOW_CTRL(_job, _task) (SCH_IS_DATA_SRC_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEVEL_UNFINISHED((_task)->level))
|
||||
|
@ -356,7 +369,7 @@ int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal *
|
|||
int32_t schAppendTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, int32_t execIdx);
|
||||
int32_t schExecStaticExplainJob(SSchedulerReq *pReq, int64_t *job, bool sync);
|
||||
int32_t schExecJobImpl(SSchedulerReq *pReq, int64_t *job, SQueryResult* pRes, bool sync);
|
||||
int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus);
|
||||
int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus);
|
||||
int32_t schCancelJob(SSchJob *pJob);
|
||||
int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode);
|
||||
uint64_t schGenTaskId(void);
|
||||
|
@ -368,6 +381,8 @@ int32_t schAsyncFetchRows(SSchJob *pJob);
|
|||
int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, void *handle, int32_t execIdx);
|
||||
int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId* pEpId, SArray* pStatusList);
|
||||
void schFreeSMsgSendInfo(SMsgSendInfo *msgSendInfo);
|
||||
char* schGetOpStr(SCH_OP_TYPE type);
|
||||
int32_t schBeginOperation(SSchJob *pJob, SCH_OP_TYPE type, bool sync);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
#include "tref.h"
|
||||
#include "trpc.h"
|
||||
|
||||
FORCE_INLINE SSchJob *schAcquireJob(int64_t refId) { qDebug("acquire jobId:0x%"PRIx64, refId); return (SSchJob *)taosAcquireRef(schMgmt.jobRef, refId); }
|
||||
FORCE_INLINE SSchJob *schAcquireJob(int64_t refId) { qDebug("sch acquire jobId:0x%"PRIx64, refId); return (SSchJob *)taosAcquireRef(schMgmt.jobRef, refId); }
|
||||
|
||||
FORCE_INLINE int32_t schReleaseJob(int64_t refId) { qDebug("release jobId:0x%"PRIx64, refId); return taosReleaseRef(schMgmt.jobRef, refId); }
|
||||
FORCE_INLINE int32_t schReleaseJob(int64_t refId) { qDebug("sch release jobId:0x%"PRIx64, refId); return taosReleaseRef(schMgmt.jobRef, refId); }
|
||||
|
||||
int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel) {
|
||||
pTask->plan = pPlan;
|
||||
|
@ -47,14 +47,14 @@ int32_t schInitJob(SSchedulerReq *pReq, SSchJob **pSchJob, SQueryResult* pRes, b
|
|||
int64_t refId = -1;
|
||||
SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob));
|
||||
if (NULL == pJob) {
|
||||
qError("QID:%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob));
|
||||
qError("QID:0x%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob));
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
pJob->attr.explainMode = pReq->pDag->explainInfo.mode;
|
||||
pJob->attr.syncSchedule = syncSchedule;
|
||||
pJob->conn = *pReq->pConn;
|
||||
pJob->sql = pReq->sql;
|
||||
pJob->reqKilled = pReq->reqKilled;
|
||||
pJob->userRes.queryRes = pRes;
|
||||
pJob->userRes.execFp = pReq->fp;
|
||||
pJob->userRes.userParam = pReq->cbParam;
|
||||
|
@ -108,7 +108,7 @@ int32_t schInitJob(SSchedulerReq *pReq, SSchJob **pSchJob, SQueryResult* pRes, b
|
|||
atomic_add_fetch_32(&schMgmt.jobNum, 1);
|
||||
|
||||
if (NULL == schAcquireJob(refId)) {
|
||||
SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId);
|
||||
SCH_JOB_ELOG("schAcquireJob job failed, refId:0x%" PRIx64, refId);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ int32_t schInitJob(SSchedulerReq *pReq, SSchJob **pSchJob, SQueryResult* pRes, b
|
|||
|
||||
SCH_JOB_DLOG("job refId:0x%" PRIx64" created", pJob->refId);
|
||||
|
||||
pJob->status = JOB_TASK_STATUS_NOT_START;
|
||||
schUpdateJobStatus(pJob, JOB_TASK_STATUS_NOT_START);
|
||||
|
||||
*pSchJob = pJob;
|
||||
|
||||
|
@ -155,18 +155,57 @@ void schFreeTask(SSchJob *pJob, SSchTask *pTask) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) {
|
||||
if (TSDB_CODE_SUCCESS == errCode) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t origCode = atomic_load_32(&pJob->errCode);
|
||||
if (TSDB_CODE_SUCCESS == origCode) {
|
||||
if (origCode == atomic_val_compare_exchange_32(&pJob->errCode, origCode, errCode)) {
|
||||
goto _return;
|
||||
}
|
||||
|
||||
origCode = atomic_load_32(&pJob->errCode);
|
||||
}
|
||||
|
||||
if (NEED_CLIENT_HANDLE_ERROR(origCode)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (NEED_CLIENT_HANDLE_ERROR(errCode)) {
|
||||
atomic_store_32(&pJob->errCode, errCode);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
_return:
|
||||
|
||||
SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode));
|
||||
}
|
||||
|
||||
|
||||
|
||||
FORCE_INLINE bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus) {
|
||||
int8_t status = SCH_GET_JOB_STATUS(pJob);
|
||||
if (pStatus) {
|
||||
*pStatus = status;
|
||||
}
|
||||
|
||||
return (status == JOB_TASK_STATUS_FAILED || status == JOB_TASK_STATUS_CANCELLED ||
|
||||
status == JOB_TASK_STATUS_CANCELLING || status == JOB_TASK_STATUS_DROPPING ||
|
||||
if (*pJob->reqKilled) {
|
||||
schUpdateJobStatus(pJob, JOB_TASK_STATUS_DROPPING);
|
||||
schUpdateJobErrCode(pJob, TSDB_CODE_TSC_QUERY_KILLED);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return (status == JOB_TASK_STATUS_FAILED || status == JOB_TASK_STATUS_DROPPING ||
|
||||
status == JOB_TASK_STATUS_SUCCEED);
|
||||
}
|
||||
|
||||
int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
|
||||
int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
|
||||
int32_t code = 0;
|
||||
|
||||
int8_t oriStatus = 0;
|
||||
|
@ -175,7 +214,11 @@ int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
|
|||
oriStatus = SCH_GET_JOB_STATUS(pJob);
|
||||
|
||||
if (oriStatus == newStatus) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
if (newStatus == JOB_TASK_STATUS_DROPPING) {
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_JOB_IS_DROPPING);
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_IGNORE_ERROR);
|
||||
}
|
||||
|
||||
switch (oriStatus) {
|
||||
|
@ -186,14 +229,13 @@ int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
|
|||
|
||||
break;
|
||||
case JOB_TASK_STATUS_NOT_START:
|
||||
if (newStatus != JOB_TASK_STATUS_EXECUTING) {
|
||||
if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_EXECUTING:
|
||||
if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_FAILED &&
|
||||
newStatus != JOB_TASK_STATUS_CANCELLING && newStatus != JOB_TASK_STATUS_CANCELLED &&
|
||||
newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
@ -208,13 +250,11 @@ int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
|
|||
break;
|
||||
case JOB_TASK_STATUS_SUCCEED:
|
||||
case JOB_TASK_STATUS_FAILED:
|
||||
case JOB_TASK_STATUS_CANCELLING:
|
||||
if (newStatus != JOB_TASK_STATUS_DROPPING) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
case JOB_TASK_STATUS_CANCELLED:
|
||||
case JOB_TASK_STATUS_DROPPING:
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED);
|
||||
break;
|
||||
|
@ -238,8 +278,67 @@ int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) {
|
|||
_return:
|
||||
|
||||
SCH_JOB_ELOG("invalid job status update, from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus));
|
||||
SCH_ERR_RET(code);
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
||||
|
||||
void schEndOperation(SSchJob *pJob) {
|
||||
int32_t op = atomic_load_32(&pJob->opStatus.op);
|
||||
if (SCH_OP_NULL == op) {
|
||||
SCH_JOB_DLOG("job already not in any operation, status:%s", jobTaskStatusStr(pJob->status));
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_store_32(&pJob->opStatus.op, SCH_OP_NULL);
|
||||
|
||||
SCH_JOB_DLOG("job end %s operation", schGetOpStr(op));
|
||||
}
|
||||
|
||||
int32_t schBeginOperation(SSchJob *pJob, SCH_OP_TYPE type, bool sync) {
|
||||
int32_t code = 0;
|
||||
int8_t status = 0;
|
||||
|
||||
if (schJobNeedToStop(pJob, &status)) {
|
||||
SCH_JOB_ELOG("abort op %s cause of job need to stop", schGetOpStr(type));
|
||||
SCH_ERR_JRET(pJob->errCode);
|
||||
}
|
||||
|
||||
if (SCH_OP_NULL != atomic_val_compare_exchange_32(&pJob->opStatus.op, SCH_OP_NULL, type)) {
|
||||
SCH_JOB_ELOG("job already in %s operation", schGetOpStr(pJob->opStatus.op));
|
||||
SCH_ERR_JRET(TSDB_CODE_TSC_APP_ERROR);
|
||||
}
|
||||
|
||||
SCH_JOB_DLOG("job start %s operation", schGetOpStr(pJob->opStatus.op));
|
||||
|
||||
pJob->opStatus.sync = sync;
|
||||
|
||||
switch (type) {
|
||||
case SCH_OP_EXEC:
|
||||
SCH_ERR_JRET(schUpdateJobStatus(pJob, JOB_TASK_STATUS_EXECUTING));
|
||||
break;
|
||||
case SCH_OP_FETCH:
|
||||
if (!SCH_JOB_NEED_FETCH(pJob)) {
|
||||
SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob));
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status));
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
SCH_JOB_ELOG("unknown operation type %d", type);
|
||||
SCH_ERR_JRET(TSDB_CODE_TSC_APP_ERROR);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
||||
schEndOperation(pJob);
|
||||
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
||||
int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) {
|
||||
|
@ -278,7 +377,7 @@ int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) {
|
|||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SCH_TASK_DLOG("children info, the %d child TID %" PRIx64, n, (*childTask)->taskId);
|
||||
SCH_TASK_DLOG("children info, the %d child TID 0x%" PRIx64, n, (*childTask)->taskId);
|
||||
}
|
||||
|
||||
if (parentNum > 0) {
|
||||
|
@ -312,7 +411,7 @@ int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) {
|
|||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SCH_TASK_DLOG("parents info, the %d parent TID %" PRIx64, n, (*parentTask)->taskId);
|
||||
SCH_TASK_DLOG("parents info, the %d parent TID 0x%" PRIx64, n, (*parentTask)->taskId);
|
||||
}
|
||||
|
||||
SCH_TASK_DLOG("level:%d, parentNum:%d, childNum:%d", i, parentNum, childNum);
|
||||
|
@ -785,37 +884,6 @@ int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) {
|
||||
if (TSDB_CODE_SUCCESS == errCode) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t origCode = atomic_load_32(&pJob->errCode);
|
||||
if (TSDB_CODE_SUCCESS == origCode) {
|
||||
if (origCode == atomic_val_compare_exchange_32(&pJob->errCode, origCode, errCode)) {
|
||||
goto _return;
|
||||
}
|
||||
|
||||
origCode = atomic_load_32(&pJob->errCode);
|
||||
}
|
||||
|
||||
if (NEED_CLIENT_HANDLE_ERROR(origCode)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (NEED_CLIENT_HANDLE_ERROR(errCode)) {
|
||||
atomic_store_32(&pJob->errCode, errCode);
|
||||
goto _return;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
_return:
|
||||
|
||||
SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode));
|
||||
}
|
||||
|
||||
|
||||
int32_t schSetJobQueryRes(SSchJob* pJob, SQueryResult* pRes) {
|
||||
pRes->code = atomic_load_32(&pJob->errCode);
|
||||
pRes->numOfRows = pJob->resNumOfRows;
|
||||
|
@ -828,7 +896,7 @@ int32_t schSetJobQueryRes(SSchJob* pJob, SQueryResult* pRes) {
|
|||
int32_t schSetJobFetchRes(SSchJob* pJob, void** pData) {
|
||||
int32_t code = 0;
|
||||
if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) {
|
||||
SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED));
|
||||
SCH_ERR_RET(schUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED));
|
||||
}
|
||||
|
||||
while (true) {
|
||||
|
@ -855,15 +923,17 @@ int32_t schSetJobFetchRes(SSchJob* pJob, void** pData) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t schNotifyUserQueryRes(SSchJob* pJob) {
|
||||
pJob->userRes.queryRes = taosMemoryCalloc(1, sizeof(*pJob->userRes.queryRes));
|
||||
if (pJob->userRes.queryRes) {
|
||||
schSetJobQueryRes(pJob, pJob->userRes.queryRes);
|
||||
int32_t schNotifyUserExecRes(SSchJob* pJob) {
|
||||
SQueryResult* pRes = taosMemoryCalloc(1, sizeof(SQueryResult));
|
||||
if (pRes) {
|
||||
schSetJobQueryRes(pJob, pRes);
|
||||
}
|
||||
|
||||
(*pJob->userRes.execFp)(pJob->userRes.queryRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode));
|
||||
schEndOperation(pJob);
|
||||
|
||||
pJob->userRes.queryRes = NULL;
|
||||
SCH_JOB_DLOG("sch start to invoke exec cb, code: %s", tstrerror(pJob->errCode));
|
||||
(*pJob->userRes.execFp)(pRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode));
|
||||
SCH_JOB_DLOG("sch end from query cb, code: %s", tstrerror(pJob->errCode));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -871,36 +941,52 @@ int32_t schNotifyUserQueryRes(SSchJob* pJob) {
|
|||
int32_t schNotifyUserFetchRes(SSchJob* pJob) {
|
||||
void* pRes = NULL;
|
||||
|
||||
SCH_ERR_RET(schSetJobFetchRes(pJob, &pRes));
|
||||
schSetJobFetchRes(pJob, &pRes);
|
||||
|
||||
schEndOperation(pJob);
|
||||
|
||||
SCH_JOB_DLOG("sch start to invoke fetch cb, code: %s", tstrerror(pJob->errCode));
|
||||
(*pJob->userRes.fetchFp)(pRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode));
|
||||
SCH_JOB_DLOG("sch end from fetch cb, code: %s", tstrerror(pJob->errCode));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void schPostJobRes(SSchJob *pJob, SCH_OP_TYPE op) {
|
||||
if (SCH_OP_NULL == pJob->opStatus.op) {
|
||||
SCH_JOB_DLOG("job not in any op, no need to post job res, status:%s", jobTaskStatusStr(pJob->status));
|
||||
return;
|
||||
}
|
||||
|
||||
if (op && pJob->opStatus.op != op) {
|
||||
SCH_JOB_ELOG("job in op %s mis-match with expected %s", schGetOpStr(pJob->opStatus.op), schGetOpStr(op));
|
||||
return;
|
||||
}
|
||||
|
||||
if (SCH_JOB_IN_SYNC_OP(pJob)) {
|
||||
tsem_post(&pJob->rspSem);
|
||||
} else if (SCH_JOB_IN_ASYNC_EXEC_OP(pJob)) {
|
||||
schNotifyUserExecRes(pJob);
|
||||
} else if (SCH_JOB_IN_ASYNC_FETCH_OP(pJob)) {
|
||||
schNotifyUserFetchRes(pJob);
|
||||
} else {
|
||||
SCH_JOB_ELOG("job not in any operation, status:%s", jobTaskStatusStr(pJob->status));
|
||||
}
|
||||
}
|
||||
|
||||
int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCode) {
|
||||
// if already FAILED, no more processing
|
||||
SCH_ERR_RET(schChkUpdateJobStatus(pJob, status));
|
||||
SCH_ERR_RET(schUpdateJobStatus(pJob, status));
|
||||
|
||||
schUpdateJobErrCode(pJob, errCode);
|
||||
|
||||
if (atomic_load_8(&pJob->userFetch) || pJob->attr.syncSchedule) {
|
||||
tsem_post(&pJob->rspSem);
|
||||
}
|
||||
|
||||
|
||||
int32_t code = atomic_load_32(&pJob->errCode);
|
||||
|
||||
SCH_JOB_DLOG("job failed with error: %s", tstrerror(code));
|
||||
|
||||
if (!pJob->attr.syncSchedule) {
|
||||
if (SCH_EXEC_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_EXEC_CB, 0)) {
|
||||
schNotifyUserQueryRes(pJob);
|
||||
} else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_FETCH_CB, 0)) {
|
||||
atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
|
||||
schNotifyUserFetchRes(pJob);
|
||||
}
|
||||
if (code) {
|
||||
SCH_JOB_DLOG("job failed with error: %s", tstrerror(code));
|
||||
}
|
||||
|
||||
schPostJobRes(pJob, 0);
|
||||
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
||||
|
@ -918,20 +1004,9 @@ int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode) {
|
|||
int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) {
|
||||
int32_t code = 0;
|
||||
|
||||
SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_PARTIAL_SUCCEED));
|
||||
SCH_ERR_RET(schUpdateJobStatus(pJob, JOB_TASK_STATUS_PARTIAL_SUCCEED));
|
||||
|
||||
if (pJob->attr.syncSchedule) {
|
||||
tsem_post(&pJob->rspSem);
|
||||
} else if (SCH_EXEC_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_EXEC_CB, 0)) {
|
||||
schNotifyUserQueryRes(pJob);
|
||||
} else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_FETCH_CB, 0)) {
|
||||
atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
|
||||
schNotifyUserFetchRes(pJob);
|
||||
}
|
||||
|
||||
if (atomic_load_8(&pJob->userFetch)) {
|
||||
SCH_ERR_JRET(schFetchFromRemote(pJob));
|
||||
}
|
||||
schPostJobRes(pJob, SCH_OP_EXEC);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -940,16 +1015,8 @@ _return:
|
|||
SCH_RET(schProcessOnJobFailure(pJob, code));
|
||||
}
|
||||
|
||||
void schProcessOnDataFetched(SSchJob *job) {
|
||||
atomic_val_compare_exchange_32(&job->remoteFetch, 1, 0);
|
||||
|
||||
if (job->attr.syncSchedule) {
|
||||
tsem_post(&job->rspSem);
|
||||
} else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&job->userCb, SCH_FETCH_CB, 0)) {
|
||||
atomic_val_compare_exchange_8(&job->userFetch, 1, 0);
|
||||
|
||||
schNotifyUserFetchRes(job);
|
||||
}
|
||||
void schProcessOnDataFetched(SSchJob *pJob) {
|
||||
schPostJobRes(pJob, SCH_OP_FETCH);
|
||||
}
|
||||
|
||||
// Note: no more task error processing, handled in function internal
|
||||
|
@ -1109,7 +1176,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
|
|||
SCH_UNLOCK(SCH_WRITE, &parent->lock);
|
||||
|
||||
if (SCH_TASK_READY_FOR_LAUNCH(readyNum, parent)) {
|
||||
SCH_TASK_DLOG("all %d children task done, start to launch parent task %" PRIx64, readyNum, parent->taskId);
|
||||
SCH_TASK_DLOG("all %d children task done, start to launch parent task 0x%" PRIx64, readyNum, parent->taskId);
|
||||
SCH_ERR_RET(schLaunchTask(pJob, parent));
|
||||
}
|
||||
}
|
||||
|
@ -1127,15 +1194,8 @@ _return:
|
|||
int32_t schFetchFromRemote(SSchJob *pJob) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (atomic_val_compare_exchange_32(&pJob->remoteFetch, 0, 1) != 0) {
|
||||
SCH_JOB_ELOG("prior fetching not finished, remoteFetch:%d", atomic_load_32(&pJob->remoteFetch));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void *resData = atomic_load_ptr(&pJob->resData);
|
||||
if (resData) {
|
||||
atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0);
|
||||
|
||||
SCH_JOB_DLOG("res already fetched, res:%p", resData);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1146,8 +1206,6 @@ int32_t schFetchFromRemote(SSchJob *pJob) {
|
|||
|
||||
_return:
|
||||
|
||||
atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0);
|
||||
|
||||
SCH_RET(schProcessOnTaskFailure(pJob, pJob->fetchTask, code));
|
||||
}
|
||||
|
||||
|
@ -1291,7 +1349,7 @@ int32_t schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTas
|
|||
int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask) {
|
||||
schGetTaskFromList(pJob->taskList, taskId, pTask);
|
||||
if (NULL == *pTask) {
|
||||
SCH_JOB_ELOG("task not found in job task list, taskId:%" PRIx64, taskId);
|
||||
SCH_JOB_ELOG("task not found in job task list, taskId:0x%" PRIx64, taskId);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
|
@ -1382,8 +1440,6 @@ int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level) {
|
|||
int32_t schLaunchJob(SSchJob *pJob) {
|
||||
SSchLevel *level = taosArrayGet(pJob->levels, pJob->levelIdx);
|
||||
|
||||
SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_EXECUTING));
|
||||
|
||||
SCH_ERR_RET(schChkJobNeedFlowCtrl(pJob, level));
|
||||
|
||||
SCH_ERR_RET(schLaunchLevelTasks(pJob, level));
|
||||
|
@ -1466,9 +1522,9 @@ void schFreeJobImpl(void *job) {
|
|||
|
||||
taosMemoryFreeClear(pJob->userRes.queryRes);
|
||||
taosMemoryFreeClear(pJob->resData);
|
||||
taosMemoryFreeClear(pJob);
|
||||
taosMemoryFree(pJob);
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " job freed, refId:%" PRIx64 ", pointer:%p", queryId, refId, pJob);
|
||||
qDebug("QID:0x%" PRIx64 " sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob);
|
||||
|
||||
int32_t jobNum = atomic_sub_fetch_32(&schMgmt.jobNum, 1);
|
||||
if (jobNum == 0) {
|
||||
|
@ -1483,26 +1539,36 @@ int32_t schExecJobImpl(SSchedulerReq *pReq, int64_t *job, SQueryResult* pRes, bo
|
|||
|
||||
int32_t code = 0;
|
||||
SSchJob *pJob = NULL;
|
||||
SCH_ERR_RET(schInitJob(pReq, &pJob, pRes, sync));
|
||||
SCH_ERR_JRET(schInitJob(pReq, &pJob, pRes, sync));
|
||||
|
||||
qDebug("QID:0x%" PRIx64 " job refId 0x%"PRIx64 " started", pReq->pDag->queryId, pJob->refId);
|
||||
qDebug("QID:0x%" PRIx64 " sch job refId 0x%"PRIx64 " started", pReq->pDag->queryId, pJob->refId);
|
||||
*job = pJob->refId;
|
||||
|
||||
if (!sync) {
|
||||
pJob->userCb = SCH_EXEC_CB;
|
||||
}
|
||||
SCH_ERR_JRET(schBeginOperation(pJob, SCH_OP_EXEC, sync));
|
||||
|
||||
SCH_ERR_JRET(schLaunchJob(pJob));
|
||||
code = schLaunchJob(pJob);
|
||||
|
||||
if (sync) {
|
||||
SCH_JOB_DLOG("will wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
|
||||
tsem_wait(&pJob->rspSem);
|
||||
|
||||
schEndOperation(pJob);
|
||||
} else if (code) {
|
||||
schPostJobRes(pJob, SCH_OP_EXEC);
|
||||
}
|
||||
|
||||
SCH_JOB_DLOG("job exec done, job status:%s, jobId:0x%"PRIx64, SCH_GET_JOB_STATUS_STR(pJob), pJob->refId);
|
||||
SCH_JOB_DLOG("job exec done, job status:%s, jobId:0x%" PRIx64, SCH_GET_JOB_STATUS_STR(pJob), pJob->refId);
|
||||
|
||||
schReleaseJob(pJob->refId);
|
||||
|
||||
SCH_RET(code);
|
||||
|
||||
_return:
|
||||
|
||||
if (!sync) {
|
||||
pReq->fp(NULL, pReq->cbParam, code);
|
||||
}
|
||||
|
||||
schReleaseJob(pJob->refId);
|
||||
|
||||
SCH_RET(code);
|
||||
|
@ -1536,10 +1602,10 @@ int32_t schAsyncExecJob(SSchedulerReq *pReq, int64_t *pJob) {
|
|||
*pJob = 0;
|
||||
|
||||
if (EXPLAIN_MODE_STATIC == pReq->pDag->explainInfo.mode) {
|
||||
SCH_ERR_RET(schExecStaticExplainJob(pReq, pJob, false));
|
||||
} else {
|
||||
SCH_ERR_RET(schExecJobImpl(pReq, pJob, NULL, false));
|
||||
SCH_RET(schExecStaticExplainJob(pReq, pJob, false));
|
||||
}
|
||||
|
||||
SCH_ERR_RET(schExecJobImpl(pReq, pJob, NULL, false));
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1550,19 +1616,29 @@ int32_t schExecStaticExplainJob(SSchedulerReq *pReq, int64_t *job, bool sync) {
|
|||
int32_t code = 0;
|
||||
SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob));
|
||||
if (NULL == pJob) {
|
||||
qError("QID:%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob));
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
qError("QID:0x%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob));
|
||||
code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
pReq->fp(NULL, pReq->cbParam, code);
|
||||
SCH_ERR_RET(code);
|
||||
}
|
||||
|
||||
pJob->sql = pReq->sql;
|
||||
pJob->reqKilled = pReq->reqKilled;
|
||||
pJob->attr.queryJob = true;
|
||||
pJob->attr.syncSchedule = sync;
|
||||
pJob->attr.explainMode = pReq->pDag->explainInfo.mode;
|
||||
pJob->queryId = pReq->pDag->queryId;
|
||||
pJob->subPlans = pReq->pDag->pSubplans;
|
||||
pJob->userRes.execFp = pReq->fp;
|
||||
pJob->userRes.userParam = pReq->cbParam;
|
||||
|
||||
schUpdateJobStatus(pJob, JOB_TASK_STATUS_NOT_START);
|
||||
|
||||
code = schBeginOperation(pJob, SCH_OP_EXEC, sync);
|
||||
if (code) {
|
||||
pReq->fp(NULL, pReq->cbParam, code);
|
||||
SCH_ERR_RET(code);
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(qExecStaticExplain(pReq->pDag, (SRetrieveTableRsp **)&pJob->resData));
|
||||
|
||||
int64_t refId = taosAddRef(schMgmt.jobRef, pJob);
|
||||
|
@ -1572,21 +1648,23 @@ int32_t schExecStaticExplainJob(SSchedulerReq *pReq, int64_t *job, bool sync) {
|
|||
}
|
||||
|
||||
if (NULL == schAcquireJob(refId)) {
|
||||
SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
SCH_JOB_ELOG("schAcquireJob job failed, refId:0x%" PRIx64, refId);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
pJob->refId = refId;
|
||||
|
||||
SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId);
|
||||
SCH_JOB_DLOG("job refId:0x%" PRIx64, pJob->refId);
|
||||
|
||||
pJob->status = JOB_TASK_STATUS_PARTIAL_SUCCEED;
|
||||
|
||||
*job = pJob->refId;
|
||||
SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
|
||||
|
||||
if (!pJob->attr.syncSchedule) {
|
||||
code = schNotifyUserQueryRes(pJob);
|
||||
|
||||
if (!sync) {
|
||||
schPostJobRes(pJob, SCH_OP_EXEC);
|
||||
} else {
|
||||
schEndOperation(pJob);
|
||||
}
|
||||
|
||||
schReleaseJob(pJob->refId);
|
||||
|
@ -1595,56 +1673,29 @@ int32_t schExecStaticExplainJob(SSchedulerReq *pReq, int64_t *job, bool sync) {
|
|||
|
||||
_return:
|
||||
|
||||
schEndOperation(pJob);
|
||||
if (!sync) {
|
||||
pReq->fp(NULL, pReq->cbParam, code);
|
||||
}
|
||||
|
||||
schFreeJobImpl(pJob);
|
||||
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
||||
int32_t schFetchRows(SSchJob *pJob) {
|
||||
int32_t code = 0;
|
||||
|
||||
int8_t status = SCH_GET_JOB_STATUS(pJob);
|
||||
if (status == JOB_TASK_STATUS_DROPPING) {
|
||||
SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status));
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
if (!SCH_JOB_NEED_FETCH(pJob)) {
|
||||
SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob));
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) {
|
||||
SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch));
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
|
||||
SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
|
||||
SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
|
||||
} else if (status == JOB_TASK_STATUS_SUCCEED) {
|
||||
SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status));
|
||||
goto _return;
|
||||
} else if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status));
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
if (!(pJob->attr.explainMode == EXPLAIN_MODE_STATIC)) {
|
||||
SCH_ERR_JRET(schFetchFromRemote(pJob));
|
||||
tsem_wait(&pJob->rspSem);
|
||||
|
||||
status = SCH_GET_JOB_STATUS(pJob);
|
||||
if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
|
||||
SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
|
||||
SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
|
||||
}
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(schSetJobFetchRes(pJob, pJob->userRes.fetchRes));
|
||||
|
||||
_return:
|
||||
|
||||
atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
|
||||
schEndOperation(pJob);
|
||||
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
@ -1652,50 +1703,14 @@ _return:
|
|||
int32_t schAsyncFetchRows(SSchJob *pJob) {
|
||||
int32_t code = 0;
|
||||
|
||||
int8_t status = SCH_GET_JOB_STATUS(pJob);
|
||||
if (status == JOB_TASK_STATUS_DROPPING) {
|
||||
SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status));
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
if (!SCH_JOB_NEED_FETCH(pJob)) {
|
||||
SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob));
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) {
|
||||
SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch));
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
|
||||
}
|
||||
|
||||
if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) {
|
||||
SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status));
|
||||
SCH_ERR_JRET(atomic_load_32(&pJob->errCode));
|
||||
} else if (status == JOB_TASK_STATUS_SUCCEED) {
|
||||
SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status));
|
||||
goto _return;
|
||||
} else if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
|
||||
SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status));
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
if (pJob->attr.explainMode == EXPLAIN_MODE_STATIC) {
|
||||
atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
|
||||
|
||||
SCH_ERR_JRET(schNotifyUserFetchRes(pJob));
|
||||
} else {
|
||||
pJob->userCb = SCH_FETCH_CB;
|
||||
|
||||
SCH_ERR_JRET(schFetchFromRemote(pJob));
|
||||
schPostJobRes(pJob, SCH_OP_FETCH);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SCH_ERR_RET(schFetchFromRemote(pJob));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_return:
|
||||
|
||||
atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0);
|
||||
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -315,8 +315,6 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0);
|
||||
|
||||
SCH_ERR_JRET(schFetchFromRemote(pJob));
|
||||
|
||||
taosMemoryFreeClear(msg);
|
||||
|
@ -346,7 +344,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
|
|||
}
|
||||
case TDMT_VND_DROP_TASK_RSP: {
|
||||
// SHOULD NEVER REACH HERE
|
||||
SCH_TASK_ELOG("invalid status to handle drop task rsp, refId:%" PRIx64, pJob->refId);
|
||||
SCH_TASK_ELOG("invalid status to handle drop task rsp, refId:0x%" PRIx64, pJob->refId);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR);
|
||||
break;
|
||||
}
|
||||
|
@ -376,7 +374,7 @@ int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t msgType, in
|
|||
|
||||
SSchJob *pJob = schAcquireJob(pParam->refId);
|
||||
if (NULL == pJob) {
|
||||
qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "taosAcquireRef job failed, may be dropped, refId:%" PRIx64,
|
||||
qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "taosAcquireRef job failed, may be dropped, refId:0x%" PRIx64,
|
||||
pParam->queryId, pParam->taskId, pParam->refId);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED);
|
||||
}
|
||||
|
@ -445,7 +443,7 @@ int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code
|
|||
|
||||
int32_t schHandleDropCallback(void *param, const SDataBuf *pMsg, int32_t code) {
|
||||
SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param;
|
||||
qDebug("QID:%" PRIx64 ",TID:%" PRIx64 " drop task rsp received, code:%x", pParam->queryId, pParam->taskId, code);
|
||||
qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId, code);
|
||||
taosMemoryFreeClear(param);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,18 @@
|
|||
#include "tref.h"
|
||||
#include "trpc.h"
|
||||
|
||||
char* schGetOpStr(SCH_OP_TYPE type) {
|
||||
switch (type) {
|
||||
case SCH_OP_NULL:
|
||||
return "NULL";
|
||||
case SCH_OP_EXEC:
|
||||
return "EXEC";
|
||||
case SCH_OP_FETCH:
|
||||
return "FETCH";
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
void schCleanClusterHb(void* pTrans) {
|
||||
SCH_LOCK(SCH_WRITE, &schMgmt.hbLock);
|
||||
|
@ -188,7 +200,7 @@ int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans) {
|
|||
SCH_UNLOCK(SCH_WRITE, &hb->lock);
|
||||
SCH_UNLOCK(SCH_READ, &schMgmt.hbLock);
|
||||
|
||||
qDebug("hb connection updated, sId:%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, pTrans:%p, pHandle:%p", schMgmt.sId,
|
||||
qDebug("hb connection updated, sId:0x%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, pTrans:%p, pHandle:%p", schMgmt.sId,
|
||||
epId->nodeId, epId->ep.fqdn, epId->ep.port, trans->pTrans, trans->pHandle);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -62,12 +62,14 @@ int32_t schedulerInit(SSchedulerCfg *cfg) {
|
|||
SCH_ERR_RET(TSDB_CODE_QRY_SYS_ERROR);
|
||||
}
|
||||
|
||||
qInfo("scheduler %" PRIx64 " initizlized, maxJob:%u", schMgmt.sId, schMgmt.cfg.maxJobNum);
|
||||
qInfo("scheduler 0x%" PRIx64 " initizlized, maxJob:%u", schMgmt.sId, schMgmt.cfg.maxJobNum);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t schedulerExecJob(SSchedulerReq *pReq, int64_t *pJob, SQueryResult *pRes) {
|
||||
qDebug("scheduler sync exec job start");
|
||||
|
||||
if (NULL == pReq || NULL == pJob || NULL == pRes) {
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
@ -76,21 +78,27 @@ int32_t schedulerExecJob(SSchedulerReq *pReq, int64_t *pJob, SQueryResult *pRes)
|
|||
}
|
||||
|
||||
int32_t schedulerAsyncExecJob(SSchedulerReq *pReq, int64_t *pJob) {
|
||||
qDebug("scheduler async exec job start");
|
||||
|
||||
int32_t code = 0;
|
||||
if (NULL == pReq || NULL == pJob) {
|
||||
code = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
} else {
|
||||
code = schAsyncExecJob(pReq, pJob);
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
||||
schAsyncExecJob(pReq, pJob);
|
||||
|
||||
_return:
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pReq->fp(NULL, pReq->cbParam, code);
|
||||
}
|
||||
|
||||
return code;
|
||||
SCH_RET(code);
|
||||
}
|
||||
|
||||
int32_t schedulerFetchRows(int64_t job, void **pData) {
|
||||
qDebug("scheduler sync fetch rows start");
|
||||
|
||||
if (NULL == pData) {
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
@ -102,7 +110,8 @@ int32_t schedulerFetchRows(int64_t job, void **pData) {
|
|||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
pJob->attr.syncSchedule = true;
|
||||
SCH_ERR_RET(schBeginOperation(pJob, SCH_OP_FETCH, true));
|
||||
|
||||
pJob->userRes.fetchRes = pData;
|
||||
code = schFetchRows(pJob);
|
||||
|
||||
|
@ -112,24 +121,32 @@ int32_t schedulerFetchRows(int64_t job, void **pData) {
|
|||
}
|
||||
|
||||
void schedulerAsyncFetchRows(int64_t job, schedulerFetchCallback fp, void* param) {
|
||||
qDebug("scheduler async fetch rows start");
|
||||
|
||||
int32_t code = 0;
|
||||
if (NULL == fp || NULL == param) {
|
||||
fp(NULL, param, TSDB_CODE_QRY_INVALID_INPUT);
|
||||
return;
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
||||
SSchJob *pJob = schAcquireJob(job);
|
||||
if (NULL == pJob) {
|
||||
qError("acquire job from jobRef list failed, may be dropped, jobId:0x%" PRIx64, job);
|
||||
fp(NULL, param, TSDB_CODE_SCH_STATUS_ERROR);
|
||||
return;
|
||||
qError("acquire sch job from job list failed, may be dropped, jobId:0x%" PRIx64, job);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
pJob->attr.syncSchedule = false;
|
||||
SCH_ERR_JRET(schBeginOperation(pJob, SCH_OP_FETCH, false));
|
||||
|
||||
pJob->userRes.fetchFp = fp;
|
||||
pJob->userRes.userParam = param;
|
||||
|
||||
/*code = */schAsyncFetchRows(pJob);
|
||||
SCH_ERR_JRET(schAsyncFetchRows(pJob));
|
||||
|
||||
_return:
|
||||
|
||||
if (code) {
|
||||
fp(NULL, param, code);
|
||||
}
|
||||
|
||||
schReleaseJob(job);
|
||||
}
|
||||
|
||||
|
@ -137,12 +154,12 @@ int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub) {
|
|||
int32_t code = 0;
|
||||
SSchJob *pJob = schAcquireJob(job);
|
||||
if (NULL == pJob) {
|
||||
qDebug("acquire job from jobRef list failed, may not started or dropped, refId:%" PRIx64, job);
|
||||
qDebug("acquire job from jobRef list failed, may not started or dropped, refId:0x%" PRIx64, job);
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
if (pJob->status < JOB_TASK_STATUS_NOT_START || pJob->levelNum <= 0 || NULL == pJob->levels) {
|
||||
qDebug("job not initialized or not executable job, refId:%" PRIx64, job);
|
||||
qDebug("job not initialized or not executable job, refId:0x%" PRIx64, job);
|
||||
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
|
@ -188,21 +205,23 @@ void schedulerStopQueryHb(void *pTrans) {
|
|||
schCleanClusterHb(pTrans);
|
||||
}
|
||||
|
||||
void schedulerFreeJob(int64_t job) {
|
||||
void schedulerFreeJob(int64_t job, int32_t errCode) {
|
||||
SSchJob *pJob = schAcquireJob(job);
|
||||
if (NULL == pJob) {
|
||||
qError("acquire job from jobRef list failed, may be dropped, jobId:0x%" PRIx64, job);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_load_8(&pJob->userFetch) > 0) {
|
||||
schProcessOnJobDropped(pJob, TSDB_CODE_QRY_JOB_FREED);
|
||||
int32_t code = schProcessOnJobDropped(pJob, errCode);
|
||||
if (TSDB_CODE_SCH_JOB_IS_DROPPING == code) {
|
||||
SCH_JOB_DLOG("sch job is already dropping, refId:0x%" PRIx64, job);
|
||||
return;
|
||||
}
|
||||
|
||||
SCH_JOB_DLOG("start to remove job from jobRef list, refId:%" PRIx64, job);
|
||||
SCH_JOB_DLOG("start to remove job from jobRef list, refId:0x%" PRIx64, job);
|
||||
|
||||
if (taosRemoveRef(schMgmt.jobRef, job)) {
|
||||
SCH_JOB_ELOG("remove job from job list failed, refId:%" PRIx64, job);
|
||||
SCH_JOB_ELOG("remove job from job list failed, refId:0x%" PRIx64, job);
|
||||
}
|
||||
|
||||
schReleaseJob(job);
|
||||
|
|
|
@ -457,7 +457,7 @@ void schtFreeQueryJob(int32_t freeThread) {
|
|||
int64_t job = queryJobRefId;
|
||||
|
||||
if (job && atomic_val_compare_exchange_64(&queryJobRefId, job, 0)) {
|
||||
schedulerFreeJob(job);
|
||||
schedulerFreeJob(job, 0);
|
||||
if (freeThread) {
|
||||
if (++freeNum % schtTestPrintNum == 0) {
|
||||
printf("FreeNum:%d\n", freeNum);
|
||||
|
@ -724,7 +724,7 @@ TEST(queryTest, normalCase) {
|
|||
|
||||
schReleaseJob(job);
|
||||
|
||||
schedulerFreeJob(job);
|
||||
schedulerFreeJob(job, 0);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
|
||||
|
@ -828,7 +828,7 @@ TEST(queryTest, readyFirstCase) {
|
|||
|
||||
schReleaseJob(job);
|
||||
|
||||
schedulerFreeJob(job);
|
||||
schedulerFreeJob(job, 0);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
|
||||
|
@ -940,7 +940,7 @@ TEST(queryTest, flowCtrlCase) {
|
|||
|
||||
schReleaseJob(job);
|
||||
|
||||
schedulerFreeJob(job);
|
||||
schedulerFreeJob(job, 0);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
|
||||
|
@ -994,7 +994,7 @@ TEST(insertTest, normalCase) {
|
|||
ASSERT_EQ(code, 0);
|
||||
ASSERT_EQ(res.numOfRows, 20);
|
||||
|
||||
schedulerFreeJob(insertJobRefId);
|
||||
schedulerFreeJob(insertJobRefId, 0);
|
||||
|
||||
schedulerDestroy();
|
||||
}
|
||||
|
|
|
@ -132,6 +132,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_INPUT, "Invalid tsc input")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_STMT_API_ERROR, "Stmt API usage error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_STMT_TBNAME_ERROR, "Stmt table name not set")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_STMT_CLAUSE_ERROR, "not supported stmt clause")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_QUERY_KILLED, "Query killed")
|
||||
|
||||
// mnode-common
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_APP_ERROR, "Mnode internal error")
|
||||
|
@ -455,6 +456,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_CTG_VG_META_MISMATCH, "table meta and vgroup
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_STATUS_ERROR, "scheduler status error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_INTERNAL_ERROR, "scheduler internal error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_TIMEOUT_ERROR, "Task timeout")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_SCH_JOB_IS_DROPPING, "Job is dropping")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QW_MSG_ERROR, "Invalid msg order")
|
||||
|
||||
// parser
|
||||
|
|
|
@ -92,7 +92,7 @@ endi
|
|||
if $data5_db != no_strict then # strict
|
||||
return -1
|
||||
endi
|
||||
if $data6_db != 345600 then # duration
|
||||
if $data6_db != 345600m then # duration
|
||||
return -1
|
||||
endi
|
||||
if $data7_db != 1440000m,1440000m,1440000m then # keep
|
||||
|
|
|
@ -34,7 +34,7 @@ endi
|
|||
if $data24 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data26 != 2880 then
|
||||
if $data26 != 2880m then
|
||||
return -1
|
||||
endi
|
||||
if $data27 != 14400m,14400m,14400m then
|
||||
|
@ -78,7 +78,7 @@ endi
|
|||
if $data24 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data26 != 21600 then
|
||||
if $data26 != 21600m then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ endi
|
|||
if $data5_db != no_strict then # strict
|
||||
return -1
|
||||
endi
|
||||
if $data6_db != 14400 then # duration
|
||||
if $data6_db != 14400m then # duration
|
||||
return -1
|
||||
endi
|
||||
if $data7_db != 5256000m,5256000m,5256000m then # keep
|
||||
|
|
|
@ -98,7 +98,7 @@ if $rows != 1 then
|
|||
endi
|
||||
#sql select * from information_schema.`streams`
|
||||
sql select * from information_schema.user_tables
|
||||
if $rows != 28 then
|
||||
if $rows != 29 then
|
||||
return -1
|
||||
endi
|
||||
#sql select * from information_schema.user_table_distributed
|
||||
|
@ -196,7 +196,7 @@ if $rows != 1 then
|
|||
endi
|
||||
#sql select * from performance_schema.`streams`
|
||||
sql select * from information_schema.user_tables
|
||||
if $rows != 28 then
|
||||
if $rows != 29 then
|
||||
return -1
|
||||
endi
|
||||
#sql select * from information_schema.user_table_distributed
|
||||
|
@ -210,4 +210,4 @@ if $rows != 3 then
|
|||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
|
|
@ -0,0 +1,198 @@
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.vnode_disbutes = None
|
||||
self.ts = 1537146000000
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
||||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
self.vnode_disbutes = vnode_tables
|
||||
|
||||
count = 0
|
||||
for k ,v in vnode_tables.items():
|
||||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def distribute_agg_query(self):
|
||||
# basic filter
|
||||
tdSql.query("select apercentile(c1 , 20) from stb1 where c1 is null")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select apercentile(c1 , 20) from stb1 where t1=1")
|
||||
tdSql.checkData(0,0,2.800000000)
|
||||
|
||||
tdSql.query("select apercentile(c1+c2 ,100) from stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,11112.000000000)
|
||||
|
||||
tdSql.query("select apercentile(c1 ,10 ) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,2.000000000)
|
||||
|
||||
tdSql.query("select apercentile(c1,20) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select apercentile(c1,20) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select apercentile(c1,20) from stb1 union all select apercentile(c1,20) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,7.389181281)
|
||||
|
||||
# join
|
||||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select apercentile(tb1.c1,100), apercentile(tb2.c2,100) from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,9.000000000)
|
||||
tdSql.checkData(0,0,9.000000000)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
|
||||
tdSql.checkRows(30)
|
||||
tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
|
||||
tdSql.checkRows(31)
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select apercentile(c1 ,10)from stb1 partition by tbname")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select apercentile(c2+2,10)+1 from (select max(c1) c2 from stb1)")
|
||||
tdSql.checkData(0,0,31.000000000)
|
||||
tdSql.query("select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,7.560701700)
|
||||
tdSql.query("select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,7.560701700)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1), apercentile(c1,10) from stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
tdSql.checkData(0,3,-999)
|
||||
tdSql.checkData(0,4,28.000000000)
|
||||
tdSql.checkData(0,5,4.560701700)
|
||||
|
||||
def run(self):
|
||||
|
||||
self.prepare_datas_of_distribute()
|
||||
self.check_distribute_datas()
|
||||
self.distribute_agg_query()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,296 @@
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.vnode_disbutes = None
|
||||
self.ts = 1537146000000
|
||||
|
||||
|
||||
def check_count_functions(self, tbname , col_name):
|
||||
|
||||
max_sql = f"select count({col_name}) from {tbname};"
|
||||
|
||||
same_sql = f"select sum(c) from (select {col_name} ,1 as c from {tbname} where {col_name} is not null) "
|
||||
|
||||
tdSql.query(max_sql)
|
||||
max_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if max_result !=same_result:
|
||||
tdLog.exit(" count function work not as expected, sql : %s "% max_sql)
|
||||
else:
|
||||
tdLog.info(" count function work as expected, sql : %s "% max_sql)
|
||||
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
||||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
self.vnode_disbutes = vnode_tables
|
||||
|
||||
count = 0
|
||||
for k ,v in vnode_tables.items():
|
||||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def check_count_distribute_diff_vnode(self,col_name):
|
||||
|
||||
vgroup_ids = []
|
||||
for k ,v in self.vnode_disbutes.items():
|
||||
if len(v)>=2:
|
||||
vgroup_ids.append(k)
|
||||
|
||||
distribute_tbnames = []
|
||||
|
||||
for vgroup_id in vgroup_ids:
|
||||
vnode_tables = self.vnode_disbutes[vgroup_id]
|
||||
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
|
||||
tbname_ins = ""
|
||||
for tbname in distribute_tbnames:
|
||||
tbname_ins += "'%s' ,"%tbname
|
||||
|
||||
tbname_filters = tbname_ins[:-1]
|
||||
|
||||
max_sql = f"select count({col_name}) from stb1 where tbname in ({tbname_filters});"
|
||||
|
||||
same_sql = f"select sum(c) from (select {col_name} ,1 as c from stb1 where tbname in ({tbname_filters}) and {col_name} is not null) "
|
||||
|
||||
tdSql.query(max_sql)
|
||||
max_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if max_result !=same_result:
|
||||
tdLog.exit(" count function work not as expected, sql : %s "% max_sql)
|
||||
else:
|
||||
tdLog.info(" count function work as expected, sql : %s "% max_sql)
|
||||
|
||||
def check_count_status(self):
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
for col_name in col_names:
|
||||
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
|
||||
colnames.append(col_name[0])
|
||||
|
||||
for tablename in tablenames:
|
||||
for colname in colnames:
|
||||
self.check_count_functions(tablename,colname)
|
||||
|
||||
# check max function for different vnode
|
||||
|
||||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
self.check_count_distribute_diff_vnode(colname)
|
||||
else:
|
||||
# self.check_count_distribute_diff_vnode(colname) # bug for tag
|
||||
pass
|
||||
|
||||
|
||||
def distribute_agg_query(self):
|
||||
# basic filter
|
||||
tdSql.query("select count(c1) from stb1 ")
|
||||
tdSql.checkData(0,0,184)
|
||||
|
||||
tdSql.query("select count(c1) from stb1 where t1=1")
|
||||
tdSql.checkData(0,0,9)
|
||||
|
||||
tdSql.query("select count(c1+c2) from stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,2)
|
||||
|
||||
tdSql.query("select count(c1) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,9)
|
||||
|
||||
tdSql.query("select count(c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select count(c1) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select count(c1) from stb1 union all select count(c1) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,184)
|
||||
|
||||
# join
|
||||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select count(tb1.c1), count(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,10)
|
||||
tdSql.checkData(0,1,10)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
|
||||
tdSql.query(" select count(*) from stb1 ")
|
||||
tdSql.checkData(0,0,187)
|
||||
tdSql.query(" select count(*) from stb1 group by t1 ")
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query(" select count(*) from stb1 group by c1 ")
|
||||
tdSql.checkRows(30)
|
||||
tdSql.query(" select count(*) from stb1 group by c2 ")
|
||||
tdSql.checkRows(31)
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select max(c1),tbname from stb1 partition by tbname")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
for row in query_data:
|
||||
tbname = row[1]
|
||||
tdSql.query(" select max(c1) from %s "%tbname)
|
||||
tdSql.checkData(0,0,row[0])
|
||||
|
||||
tdSql.query("select max(c1),tbname from stb1 partition by t1")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
for row in query_data:
|
||||
tbname = row[1]
|
||||
tdSql.query(" select max(c1) from %s "%tbname)
|
||||
tdSql.checkData(0,0,row[0])
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select abs(c2+2)+1 from (select count(c1) c2 from stb1)")
|
||||
tdSql.checkData(0,0,187.000000000)
|
||||
tdSql.query("select count(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,184)
|
||||
tdSql.query("select count(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,184)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3) from stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
tdSql.checkData(0,3,-999)
|
||||
|
||||
def run(self):
|
||||
|
||||
self.prepare_datas_of_distribute()
|
||||
self.check_distribute_datas()
|
||||
self.check_count_status()
|
||||
self.distribute_agg_query()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,293 @@
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.vnode_disbutes = None
|
||||
self.ts = 1537146000000
|
||||
|
||||
|
||||
def check_max_functions(self, tbname , col_name):
|
||||
|
||||
max_sql = f"select max({col_name}) from {tbname};"
|
||||
|
||||
same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1"
|
||||
|
||||
tdSql.query(max_sql)
|
||||
max_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if max_result !=same_result:
|
||||
tdLog.exit(" max function work not as expected, sql : %s "% max_sql)
|
||||
else:
|
||||
tdLog.info(" max function work as expected, sql : %s "% max_sql)
|
||||
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
||||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
self.vnode_disbutes = vnode_tables
|
||||
|
||||
count = 0
|
||||
for k ,v in vnode_tables.items():
|
||||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def check_max_distribute_diff_vnode(self,col_name):
|
||||
|
||||
vgroup_ids = []
|
||||
for k ,v in self.vnode_disbutes.items():
|
||||
if len(v)>=2:
|
||||
vgroup_ids.append(k)
|
||||
|
||||
distribute_tbnames = []
|
||||
|
||||
for vgroup_id in vgroup_ids:
|
||||
vnode_tables = self.vnode_disbutes[vgroup_id]
|
||||
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
|
||||
tbname_ins = ""
|
||||
for tbname in distribute_tbnames:
|
||||
tbname_ins += "'%s' ,"%tbname
|
||||
|
||||
tbname_filters = tbname_ins[:-1]
|
||||
|
||||
max_sql = f"select max({col_name}) from stb1 where tbname in ({tbname_filters});"
|
||||
|
||||
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) order by {col_name} desc limit 1"
|
||||
|
||||
tdSql.query(max_sql)
|
||||
max_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if max_result !=same_result:
|
||||
tdLog.exit(" max function work not as expected, sql : %s "% max_sql)
|
||||
else:
|
||||
tdLog.info(" max function work as expected, sql : %s "% max_sql)
|
||||
|
||||
def check_max_status(self):
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
for col_name in col_names:
|
||||
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
|
||||
colnames.append(col_name[0])
|
||||
|
||||
for tablename in tablenames:
|
||||
for colname in colnames:
|
||||
self.check_max_functions(tablename,colname)
|
||||
|
||||
# check max function for different vnode
|
||||
|
||||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
self.check_max_distribute_diff_vnode(colname)
|
||||
else:
|
||||
# self.check_max_distribute_diff_vnode(colname) # bug for tag
|
||||
pass
|
||||
|
||||
|
||||
def distribute_agg_query(self):
|
||||
# basic filter
|
||||
tdSql.query("select max(c1) from stb1 where c1 is null")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select max(c1) from stb1 where t1=1")
|
||||
tdSql.checkData(0,0,10)
|
||||
|
||||
tdSql.query("select max(c1+c2) from stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,11112.000000000)
|
||||
|
||||
tdSql.query("select max(c1) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,10)
|
||||
|
||||
tdSql.query("select max(c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select max(c1) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select max(c1) from stb1 union all select max(c1) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,28)
|
||||
|
||||
# join
|
||||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select max(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,9)
|
||||
tdSql.checkData(0,0,9.00000)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
|
||||
tdSql.checkRows(30)
|
||||
tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
|
||||
tdSql.checkRows(31)
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select max(c1),tbname from stb1 partition by tbname")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
for row in query_data:
|
||||
tbname = row[1]
|
||||
tdSql.query(" select max(c1) from %s "%tbname)
|
||||
tdSql.checkData(0,0,row[0])
|
||||
|
||||
tdSql.query("select max(c1),tbname from stb1 partition by t1")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
for row in query_data:
|
||||
tbname = row[1]
|
||||
tdSql.query(" select max(c1) from %s "%tbname)
|
||||
tdSql.checkData(0,0,row[0])
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select abs(c2+2)+1 from (select max(c1) c2 from stb1)")
|
||||
tdSql.checkData(0,0,31.000000000)
|
||||
tdSql.query("select max(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,31.000000000)
|
||||
tdSql.query("select max(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,31.000000000)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3) from stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
tdSql.checkData(0,3,-999)
|
||||
|
||||
def run(self):
|
||||
|
||||
self.prepare_datas_of_distribute()
|
||||
self.check_distribute_datas()
|
||||
self.check_max_status()
|
||||
self.distribute_agg_query()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,294 @@
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.vnode_disbutes = None
|
||||
self.ts = 1537146000000
|
||||
|
||||
|
||||
def check_min_functions(self, tbname , col_name):
|
||||
|
||||
min_sql = f"select min({col_name}) from {tbname};"
|
||||
|
||||
same_sql = f"select {col_name} from {tbname} where {col_name} is not null order by {col_name} asc limit 1"
|
||||
|
||||
tdSql.query(min_sql)
|
||||
min_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if min_result !=same_result:
|
||||
tdLog.exit(" min function work not as expected, sql : %s "% min_sql)
|
||||
else:
|
||||
tdLog.info(" min function work as expected, sql : %s "% min_sql)
|
||||
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
||||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
self.vnode_disbutes = vnode_tables
|
||||
|
||||
count = 0
|
||||
for k ,v in vnode_tables.items():
|
||||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def check_min_distribute_diff_vnode(self,col_name):
|
||||
|
||||
vgroup_ids = []
|
||||
for k ,v in self.vnode_disbutes.items():
|
||||
if len(v)>=2:
|
||||
vgroup_ids.append(k)
|
||||
|
||||
distribute_tbnames = []
|
||||
|
||||
for vgroup_id in vgroup_ids:
|
||||
vnode_tables = self.vnode_disbutes[vgroup_id]
|
||||
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
|
||||
tbname_ins = ""
|
||||
for tbname in distribute_tbnames:
|
||||
tbname_ins += "'%s' ,"%tbname
|
||||
|
||||
tbname_filters = tbname_ins[:-1]
|
||||
|
||||
min_sql = f"select min({col_name}) from stb1 where tbname in ({tbname_filters});"
|
||||
|
||||
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null order by {col_name} asc limit 1"
|
||||
|
||||
tdSql.query(min_sql)
|
||||
min_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if min_result !=same_result:
|
||||
tdLog.exit(" min function work not as expected, sql : %s "% min_sql)
|
||||
else:
|
||||
tdLog.info(" min function work as expected, sql : %s "% min_sql)
|
||||
|
||||
def check_min_status(self):
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
for col_name in col_names:
|
||||
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
|
||||
colnames.append(col_name[0])
|
||||
|
||||
for tablename in tablenames:
|
||||
for colname in colnames:
|
||||
self.check_min_functions(tablename,colname)
|
||||
|
||||
# check max function for different vnode
|
||||
|
||||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
self.check_min_distribute_diff_vnode(colname)
|
||||
else:
|
||||
# self.check_min_distribute_diff_vnode(colname) # bug for tag
|
||||
pass
|
||||
|
||||
|
||||
def distribute_agg_query(self):
|
||||
# basic filter
|
||||
tdSql.query("select min(c1) from stb1 where c1 is null")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select min(c1) from stb1 where t1=1")
|
||||
tdSql.checkData(0,0,2)
|
||||
|
||||
tdSql.query("select min(c1+c2) from stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,11112.000000000)
|
||||
|
||||
tdSql.query("select min(c1) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,2)
|
||||
|
||||
tdSql.query("select min(c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select min(c1) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select min(c1) from stb1 union all select min(c1) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,0)
|
||||
|
||||
# join
|
||||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select min(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,0)
|
||||
tdSql.checkData(0,0,0.00000)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.query(" select min(c1),c1 from stb1 group by t1 ")
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query(" select min(c1),c1 from stb1 group by c1 ")
|
||||
tdSql.checkRows(30)
|
||||
tdSql.query(" select min(c1),c2 from stb1 group by c2 ")
|
||||
tdSql.checkRows(31)
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select min(c1),tbname from stb1 partition by tbname")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
for row in query_data:
|
||||
tbname = row[1]
|
||||
tdSql.query(" select min(c1) from %s "%tbname)
|
||||
tdSql.checkData(0,0,row[0])
|
||||
|
||||
tdSql.query("select min(c1),tbname from stb1 partition by t1")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
for row in query_data:
|
||||
tbname = row[1]
|
||||
tdSql.query(" select min(c1) from %s "%tbname)
|
||||
tdSql.checkData(0,0,row[0])
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select abs(c2+2)+1 from (select min(c1) c2 from stb1)")
|
||||
tdSql.checkData(0,0,3.000000000)
|
||||
tdSql.query("select min(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,3.000000000)
|
||||
tdSql.query("select min(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,3.000000000)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3),min(c1) from stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
tdSql.checkData(0,3,-999)
|
||||
tdSql.checkData(0,4,0)
|
||||
|
||||
def run(self):
|
||||
|
||||
self.prepare_datas_of_distribute()
|
||||
self.check_distribute_datas()
|
||||
self.check_min_status()
|
||||
self.distribute_agg_query()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,281 @@
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import random
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.vnode_disbutes = None
|
||||
self.ts = 1537146000000
|
||||
|
||||
|
||||
def check_spread_functions(self, tbname , col_name):
|
||||
|
||||
spread_sql = f"select spread({col_name}) from {tbname};"
|
||||
|
||||
same_sql = f"select max({col_name})-min({col_name}) from {tbname}"
|
||||
|
||||
tdSql.query(spread_sql)
|
||||
spread_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if spread_result !=same_result:
|
||||
tdLog.exit(" max function work not as expected, sql : %s "% spread_sql)
|
||||
else:
|
||||
tdLog.info(" max function work as expected, sql : %s "% spread_sql)
|
||||
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
||||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
self.vnode_disbutes = vnode_tables
|
||||
|
||||
count = 0
|
||||
for k ,v in vnode_tables.items():
|
||||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def check_spread_distribute_diff_vnode(self,col_name):
|
||||
|
||||
vgroup_ids = []
|
||||
for k ,v in self.vnode_disbutes.items():
|
||||
if len(v)>=2:
|
||||
vgroup_ids.append(k)
|
||||
|
||||
distribute_tbnames = []
|
||||
|
||||
for vgroup_id in vgroup_ids:
|
||||
vnode_tables = self.vnode_disbutes[vgroup_id]
|
||||
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
|
||||
tbname_ins = ""
|
||||
for tbname in distribute_tbnames:
|
||||
tbname_ins += "'%s' ,"%tbname
|
||||
|
||||
tbname_filters = tbname_ins[:-1]
|
||||
|
||||
spread_sql = f"select spread({col_name}) from stb1 where tbname in ({tbname_filters})"
|
||||
|
||||
same_sql = f"select max({col_name}) - min({col_name}) from stb1 where tbname in ({tbname_filters})"
|
||||
|
||||
tdSql.query(spread_sql)
|
||||
spread_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if spread_result !=same_result:
|
||||
tdLog.exit(" spread function work not as expected, sql : %s "% spread_sql)
|
||||
else:
|
||||
tdLog.info(" spread function work as expected, sql : %s "% spread_sql)
|
||||
|
||||
def check_spread_status(self):
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
for col_name in col_names:
|
||||
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
|
||||
colnames.append(col_name[0])
|
||||
|
||||
for tablename in tablenames:
|
||||
for colname in colnames:
|
||||
self.check_spread_functions(tablename,colname)
|
||||
|
||||
# check max function for different vnode
|
||||
|
||||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
self.check_spread_distribute_diff_vnode(colname)
|
||||
else:
|
||||
# self.check_spread_distribute_diff_vnode(colname) # bug for tag
|
||||
pass
|
||||
|
||||
|
||||
def distribute_agg_query(self):
|
||||
# basic filter
|
||||
tdSql.query("select spread(c1) from stb1 where c1 is null")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select spread(c1) from stb1 where t1=1")
|
||||
tdSql.checkData(0,0,8.000000000)
|
||||
|
||||
tdSql.query("select spread(c1+c2) from stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,0.000000000)
|
||||
|
||||
tdSql.query("select spread(c1) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,8.000000000)
|
||||
|
||||
tdSql.query("select spread(c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select spread(c1) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select spread(c1) from stb1 union all select max(c1)-min(c1) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,28.000000000)
|
||||
|
||||
# join
|
||||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select spread(tb1.c1), spread(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,9.000000000)
|
||||
tdSql.checkData(0,0,9.00000)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.query(" select max(c1),c1 from stb1 group by t1 ")
|
||||
tdSql.checkRows(20)
|
||||
tdSql.query(" select max(c1),c1 from stb1 group by c1 ")
|
||||
tdSql.checkRows(30)
|
||||
tdSql.query(" select max(c1),c2 from stb1 group by c2 ")
|
||||
tdSql.checkRows(31)
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select spread(c1) from stb1 partition by tbname")
|
||||
query_data = tdSql.queryResult
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select spread(c2+2)+1 from (select max(c1) c2 from stb1)")
|
||||
tdSql.checkData(0,0,1.000000000)
|
||||
tdSql.query("select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,29.000000000)
|
||||
tdSql.query("select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,29.000000000)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1) from stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
tdSql.checkData(0,3,-999)
|
||||
tdSql.checkData(0,4,28.000000000)
|
||||
|
||||
def run(self):
|
||||
|
||||
self.prepare_datas_of_distribute()
|
||||
self.check_distribute_datas()
|
||||
self.check_spread_status()
|
||||
self.distribute_agg_query()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,278 @@
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import random ,os ,sys
|
||||
import platform
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.vnode_disbutes = None
|
||||
self.ts = 1537146000000
|
||||
|
||||
|
||||
def check_sum_functions(self, tbname , col_name):
|
||||
|
||||
sum_sql = f"select sum({col_name}) from {tbname};"
|
||||
|
||||
same_sql = f"select {col_name} from {tbname} where {col_name} is not null "
|
||||
|
||||
tdSql.query(same_sql)
|
||||
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'):
|
||||
pre_data = np.array(pre_data, dtype = 'int64')
|
||||
pre_sum = np.sum(pre_data)
|
||||
|
||||
tdSql.query(sum_sql)
|
||||
tdSql.checkData(0,0,pre_sum)
|
||||
|
||||
def prepare_datas_of_distribute(self):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
|
||||
def check_distribute_datas(self):
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
||||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
self.vnode_disbutes = vnode_tables
|
||||
|
||||
count = 0
|
||||
for k ,v in vnode_tables.items():
|
||||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
def check_sum_distribute_diff_vnode(self,col_name):
|
||||
|
||||
vgroup_ids = []
|
||||
for k ,v in self.vnode_disbutes.items():
|
||||
if len(v)>=2:
|
||||
vgroup_ids.append(k)
|
||||
|
||||
distribute_tbnames = []
|
||||
|
||||
for vgroup_id in vgroup_ids:
|
||||
vnode_tables = self.vnode_disbutes[vgroup_id]
|
||||
distribute_tbnames.append(random.sample(vnode_tables,1)[0])
|
||||
tbname_ins = ""
|
||||
for tbname in distribute_tbnames:
|
||||
tbname_ins += "'%s' ,"%tbname
|
||||
|
||||
tbname_filters = tbname_ins[:-1]
|
||||
|
||||
sum_sql = f"select sum({col_name}) from stb1 where tbname in ({tbname_filters});"
|
||||
|
||||
same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null "
|
||||
|
||||
tdSql.query(same_sql)
|
||||
pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
|
||||
if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'):
|
||||
pre_data = np.array(pre_data, dtype = 'int64')
|
||||
pre_sum = np.sum(pre_data)
|
||||
|
||||
tdSql.query(sum_sql)
|
||||
tdSql.checkData(0,0,pre_sum)
|
||||
|
||||
def check_sum_status(self):
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
for col_name in col_names:
|
||||
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
|
||||
colnames.append(col_name[0])
|
||||
|
||||
for tablename in tablenames:
|
||||
for colname in colnames:
|
||||
self.check_sum_functions(tablename,colname)
|
||||
|
||||
# check max function for different vnode
|
||||
|
||||
for colname in colnames:
|
||||
if colname.startswith("c"):
|
||||
self.check_sum_distribute_diff_vnode(colname)
|
||||
else:
|
||||
# self.check_sum_distribute_diff_vnode(colname) # bug for tag
|
||||
pass
|
||||
|
||||
|
||||
def distribute_agg_query(self):
|
||||
# basic filter
|
||||
tdSql.query(" select sum(c1) from stb1 ")
|
||||
tdSql.checkData(0,0,2592)
|
||||
|
||||
tdSql.query(" select sum(a) from (select sum(c1) a from stb1 partition by tbname) ")
|
||||
tdSql.checkData(0,0,2592)
|
||||
|
||||
tdSql.query(" select sum(c1) from stb1 where t1=1")
|
||||
tdSql.checkData(0,0,54)
|
||||
|
||||
tdSql.query("select sum(c1+c2) from stb1 where c1 =1 ")
|
||||
tdSql.checkData(0,0,22224.000000000)
|
||||
|
||||
tdSql.query("select sum(c1) from stb1 where tbname=\"ct2\"")
|
||||
tdSql.checkData(0,0,54)
|
||||
|
||||
tdSql.query("select sum(c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
tdSql.query("select sum(c1) from stb1 where t1> 4 partition by tbname")
|
||||
tdSql.checkRows(15)
|
||||
|
||||
# union all
|
||||
tdSql.query("select sum(c1) from stb1 union all select sum(c1) from stb1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,2592)
|
||||
|
||||
tdSql.query("select sum(a) from (select sum(c1) a from stb1 union all select sum(c1) a from stb1)")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,5184)
|
||||
|
||||
# join
|
||||
|
||||
tdSql.execute(" create database if not exists db ")
|
||||
tdSql.execute(" use db ")
|
||||
tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
|
||||
tdSql.execute(" create table tb1 using st tags(1) ")
|
||||
tdSql.execute(" create table tb2 using st tags(2) ")
|
||||
|
||||
|
||||
for i in range(10):
|
||||
ts = i*10 + self.ts
|
||||
tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
|
||||
tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
|
||||
|
||||
tdSql.query("select sum(tb1.c1), sum(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,45)
|
||||
tdSql.checkData(0,1,45.000000000)
|
||||
|
||||
# group by
|
||||
tdSql.execute(" use testdb ")
|
||||
|
||||
# partition by tbname or partition by tag
|
||||
tdSql.query("select sum(c1) from stb1 partition by tbname")
|
||||
tdSql.checkRows(20)
|
||||
|
||||
# nest query for support max
|
||||
tdSql.query("select abs(c2+2)+1 from (select sum(c1) c2 from stb1)")
|
||||
tdSql.checkData(0,0,2595.000000000)
|
||||
tdSql.query("select sum(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,2960.000000000)
|
||||
tdSql.query("select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)")
|
||||
tdSql.checkData(0,0,2960.000000000)
|
||||
|
||||
# mixup with other functions
|
||||
tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2) from stb1")
|
||||
tdSql.checkData(0,0,28)
|
||||
tdSql.checkData(0,1,184)
|
||||
tdSql.checkData(0,2,-99999)
|
||||
tdSql.checkData(0,3,-999)
|
||||
tdSql.checkData(0,4,28202310.000000000)
|
||||
|
||||
def run(self):
|
||||
|
||||
self.prepare_datas_of_distribute()
|
||||
self.check_distribute_datas()
|
||||
self.check_sum_status()
|
||||
self.distribute_agg_query()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -5,198 +5,215 @@ import numpy as np
|
|||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143,
|
||||
"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.ts = 1537146000000
|
||||
|
||||
def prepare_data(self):
|
||||
|
||||
pass
|
||||
def run(self):
|
||||
self.binary_str = 'taosdata'
|
||||
self.nchar_str = '涛思数据'
|
||||
def max_check_stb_and_tb_base(self):
|
||||
tdSql.prepare()
|
||||
|
||||
intData = []
|
||||
floatData = []
|
||||
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
|
||||
tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
|
||||
tdSql.execute("create table stb_1 using stb tags('beijing')")
|
||||
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
for i in ['ts','col11','col12','col13']:
|
||||
for j in ['db.stb','stb','db.stb_1','stb_1']:
|
||||
tdSql.error(f'select max({i} from {j} )')
|
||||
|
||||
# max verifacation
|
||||
tdSql.error("select max(ts) from stb_1")
|
||||
tdSql.error("select max(ts) from db.stb_1")
|
||||
tdSql.error("select max(col7) from stb_1")
|
||||
tdSql.error("select max(col7) from db.stb_1")
|
||||
tdSql.error("select max(col8) from stb_1")
|
||||
tdSql.error("select max(col8) from db.stb_1")
|
||||
tdSql.error("select max(col9) from stb_1")
|
||||
tdSql.error("select max(col9) from db.stb_1")
|
||||
|
||||
tdSql.query("select max(col1) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col1) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col2) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col2) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col3) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col3) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col4) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col4) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col11) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col11) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col12) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col12) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col13) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col13) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col14) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col14) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col5) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col5) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col6) from stb_1")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col6) from db.stb_1")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
for i in range(1,11):
|
||||
for j in ['db.stb','stb','db.stb_1','stb_1']:
|
||||
tdSql.query(f"select max(col{i}) from {j}")
|
||||
if i<9:
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
elif i>=9:
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col1) from stb_1 where col2<=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
|
||||
|
||||
|
||||
tdSql.error("select max(ts) from stb")
|
||||
tdSql.error("select max(ts) from db.stb")
|
||||
tdSql.error("select max(col7) from stb")
|
||||
tdSql.error("select max(col7) from db.stb")
|
||||
tdSql.error("select max(col8) from stb")
|
||||
tdSql.error("select max(col8) from db.stb")
|
||||
tdSql.error("select max(col9) from stb")
|
||||
tdSql.error("select max(col9) from db.stb")
|
||||
|
||||
tdSql.query("select max(col1) from stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col1) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col2) from stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col2) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col3) from stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col3) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col4) from stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col4) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col11) from stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col11) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col12) from stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col12) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col13) from stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col13) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col14) from stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col14) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col5) from stb")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col5) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col6) from stb")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col6) from db.stb")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col1) from stb where col2<=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
|
||||
|
||||
|
||||
tdSql.error("select max(ts) from ntb")
|
||||
tdSql.error("select max(ts) from db.ntb")
|
||||
tdSql.error("select max(col7) from ntb")
|
||||
tdSql.error("select max(col7) from db.ntb")
|
||||
tdSql.error("select max(col8) from ntb")
|
||||
tdSql.error("select max(col8) from db.ntb")
|
||||
tdSql.error("select max(col9) from ntb")
|
||||
tdSql.error("select max(col9) from db.ntb")
|
||||
|
||||
tdSql.query("select max(col1) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col1) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col2) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col2) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col3) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col3) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col4) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col4) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col11) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col11) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col12) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col12) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col13) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col13) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col14) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col14) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
tdSql.query("select max(col5) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col5) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col6) from ntb")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col6) from db.ntb")
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col1) from stb_1 where col2<=5")
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
def max_check_ntb_base(self):
|
||||
tdSql.prepare()
|
||||
intData = []
|
||||
floatData = []
|
||||
tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
|
||||
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''')
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
|
||||
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
|
||||
intData.append(i + 1)
|
||||
floatData.append(i + 0.1)
|
||||
for i in ['ts','col11','col12','col13']:
|
||||
for j in ['db.ntb','ntb']:
|
||||
tdSql.error(f'select max({i} from {j} )')
|
||||
for i in range(1,11):
|
||||
for j in ['db.ntb','ntb']:
|
||||
tdSql.query(f"select max(col{i}) from {j}")
|
||||
if i<9:
|
||||
tdSql.checkData(0, 0, np.max(intData))
|
||||
elif i>=9:
|
||||
tdSql.checkData(0, 0, np.max(floatData))
|
||||
tdSql.query("select max(col1) from ntb where col2<=5")
|
||||
tdSql.checkData(0,0,5)
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
|
||||
def check_max_functions(self, tbname , col_name):
|
||||
|
||||
max_sql = f"select max({col_name}) from {tbname};"
|
||||
|
||||
same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1"
|
||||
|
||||
tdSql.query(max_sql)
|
||||
max_result = tdSql.queryResult
|
||||
|
||||
tdSql.query(same_sql)
|
||||
same_result = tdSql.queryResult
|
||||
|
||||
if max_result !=same_result:
|
||||
tdLog.exit(" max function work not as expected, sql : %s "% max_sql)
|
||||
else:
|
||||
tdLog.info(" max function work as expected, sql : %s "% max_sql)
|
||||
|
||||
|
||||
def support_distributed_aggregate(self):
|
||||
|
||||
# prepate datas for 20 tables distributed at different vgroups
|
||||
tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
|
||||
tdSql.execute(" use testdb ")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(20):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
|
||||
for i in range(1,21):
|
||||
if i ==1 or i == 4:
|
||||
continue
|
||||
else:
|
||||
tbname = "ct"+f'{i}'
|
||||
for j in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdLog.info(" prepare data for distributed_aggregate done! ")
|
||||
|
||||
# get vgroup_ids of all
|
||||
tdSql.query("show vgroups ")
|
||||
vgroups = tdSql.queryResult
|
||||
|
||||
vnode_tables={}
|
||||
|
||||
for vgroup_id in vgroups:
|
||||
vnode_tables[vgroup_id[0]]=[]
|
||||
|
||||
|
||||
# check sub_table of per vnode ,make sure sub_table has been distributed
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
vnode_tables[table_name[6]].append(table_name[0])
|
||||
|
||||
count = 0
|
||||
for k ,v in vnode_tables.items():
|
||||
if len(v)>=2:
|
||||
count+=1
|
||||
if count < 2:
|
||||
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
|
||||
|
||||
# check max function work status
|
||||
|
||||
tdSql.query("show tables like 'ct%'")
|
||||
table_names = tdSql.queryResult
|
||||
tablenames = []
|
||||
for table_name in table_names:
|
||||
tablenames.append(table_name[0])
|
||||
|
||||
tdSql.query("desc stb1")
|
||||
col_names = tdSql.queryResult
|
||||
|
||||
colnames = []
|
||||
for col_name in col_names:
|
||||
if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]:
|
||||
colnames.append(col_name[0])
|
||||
|
||||
for tablename in tablenames:
|
||||
for colname in colnames:
|
||||
self.check_max_functions(tablename,colname)
|
||||
|
||||
# max function with basic filter
|
||||
print(vnode_tables)
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# max verifacation
|
||||
self.max_check_stb_and_tb_base()
|
||||
self.max_check_ntb_base()
|
||||
|
||||
self.support_distributed_aggregate()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -192,7 +192,7 @@ class TDTestCase:
|
|||
time.sleep(1)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
pollDelay = 20
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
|
||||
|
@ -208,7 +208,7 @@ class TDTestCase:
|
|||
os.system(shellCmd)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
# prepareEnvThread.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
while 1:
|
||||
|
|
|
@ -322,176 +322,6 @@ class TDTestCase:
|
|||
|
||||
tdLog.printNoPrefix("======== test case 5 end ...... ")
|
||||
|
||||
def tmqCase6(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db60', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db61', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db60'
|
||||
topicName2 = 'topic_db61'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1 + ',' + topicName2
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
#consumerId = 1
|
||||
#self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
tdSql.query("drop topic %s"%topicName2)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 6 end ...... ")
|
||||
|
||||
def tmqCase7(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db70', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db71', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db60'
|
||||
topicName2 = 'topic_db61'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1 + ',' + topicName2
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
consumerId = 1
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 2
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
tdSql.query("drop topic %s"%topicName2)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 7 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
|
@ -505,8 +335,6 @@ class TDTestCase:
|
|||
|
||||
self.tmqCase4(cfgPath, buildPath)
|
||||
self.tmqCase5(cfgPath, buildPath)
|
||||
self.tmqCase6(cfgPath, buildPath)
|
||||
self.tmqCase7(cfgPath, buildPath)
|
||||
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -72,10 +72,10 @@ class TDTestCase:
|
|||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
for i in range(expectRows):
|
||||
tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
resultList.append(tdSql.getData(i , 3))
|
||||
|
||||
return resultList
|
||||
|
@ -85,7 +85,7 @@ class TDTestCase:
|
|||
logFile = cfgPath + '/../log/valgrind-tmq.log'
|
||||
shellCmd = 'nohup valgrind --log-file=' + logFile
|
||||
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
|
||||
|
||||
|
||||
if (platform.system().lower() == 'windows'):
|
||||
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
|
@ -97,7 +97,7 @@ class TDTestCase:
|
|||
tdLog.info(shellCmd)
|
||||
os.system(shellCmd)
|
||||
|
||||
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
|
||||
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum):
|
||||
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
|
||||
tsql.execute("use %s" %dbName)
|
||||
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
|
||||
|
@ -151,8 +151,7 @@ class TDTestCase:
|
|||
parameterDict["dbName"],\
|
||||
parameterDict["vgroups"],\
|
||||
parameterDict["stbName"],\
|
||||
parameterDict["ctbNum"],\
|
||||
parameterDict["rowsPerTbl"])
|
||||
parameterDict["ctbNum"])
|
||||
|
||||
self.insert_data(tsql,\
|
||||
parameterDict["dbName"],\
|
||||
|
@ -163,16 +162,16 @@ class TDTestCase:
|
|||
parameterDict["startTs"])
|
||||
return
|
||||
|
||||
def tmqCase8(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 8: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
def tmqCase6(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db8', \
|
||||
'dbName': 'db60', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
@ -183,14 +182,32 @@ class TDTestCase:
|
|||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db61', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
topicName1 = 'topic_db60'
|
||||
topicName2 = 'topic_db61'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2
|
||||
topicList = topicName1
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1 + ',' + topicName2
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
|
@ -199,6 +216,9 @@ class TDTestCase:
|
|||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
#consumerId = 1
|
||||
#self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
|
@ -208,7 +228,8 @@ class TDTestCase:
|
|||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 1
|
||||
|
@ -221,36 +242,21 @@ class TDTestCase:
|
|||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
self.initConsumerTable()
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
tdSql.query("drop topic %s"%topicName2)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 8 end ...... ")
|
||||
tdLog.printNoPrefix("======== test case 6 end ...... ")
|
||||
|
||||
def tmqCase9(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 9: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
def tmqCase7(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db9', \
|
||||
'dbName': 'db70', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
@ -261,14 +267,32 @@ class TDTestCase:
|
|||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db71', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
topicName1 = 'topic_db60'
|
||||
topicName2 = 'topic_db61'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2
|
||||
topicList = topicName1
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1 + ',' + topicName2
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
|
@ -277,86 +301,7 @@ class TDTestCase:
|
|||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdSql.query("select count(*) from %s.%s" %(parameterDict['dbName'], parameterDict['stbName']))
|
||||
countOfStb = tdSql.getData(0,0)
|
||||
print ("====total rows of stb: %d"%countOfStb)
|
||||
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
if totalConsumeRows < expectrowcnt:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
self.initConsumerTable()
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows2 = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows2 += resultList[i]
|
||||
|
||||
tdLog.info("firstly act consume rows: %d"%(totalConsumeRows))
|
||||
tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt))
|
||||
if totalConsumeRows + totalConsumeRows2 != expectrowcnt:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 9 end ...... ")
|
||||
|
||||
def tmqCase10(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 10: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db10', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
consumerId = 1
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
@ -367,23 +312,12 @@ class TDTestCase:
|
|||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info("pkill consume processor")
|
||||
if (platform.system().lower() == 'windows'):
|
||||
os.system("TASKKILL /F /IM tmq_sim.exe")
|
||||
else:
|
||||
os.system('pkill tmq_sim')
|
||||
expectRows = 0
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
expectRows = 1
|
||||
expectRows = 2
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
|
@ -393,85 +327,10 @@ class TDTestCase:
|
|||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
time.sleep(15)
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
tdSql.query("drop topic %s"%topicName2)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 10 end ...... ")
|
||||
|
||||
def tmqCase11(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 11: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db11', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:true,\
|
||||
auto.commit.interval.ms:1000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 20
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
time.sleep(6)
|
||||
tdLog.info("pkill consume processor")
|
||||
if (platform.system().lower() == 'windows'):
|
||||
os.system("TASKKILL /F /IM tmq_sim.exe")
|
||||
else:
|
||||
os.system('pkill tmq_sim')
|
||||
expectRows = 0
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0:
|
||||
tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
time.sleep(15)
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 11 end ...... ")
|
||||
tdLog.printNoPrefix("======== test case 7 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
@ -484,10 +343,9 @@ class TDTestCase:
|
|||
cfgPath = buildPath + "/../sim/psim/cfg"
|
||||
tdLog.info("cfgPath: %s" % cfgPath)
|
||||
|
||||
self.tmqCase8(cfgPath, buildPath)
|
||||
self.tmqCase9(cfgPath, buildPath)
|
||||
self.tmqCase10(cfgPath, buildPath)
|
||||
self.tmqCase11(cfgPath, buildPath)
|
||||
self.tmqCase6(cfgPath, buildPath)
|
||||
self.tmqCase7(cfgPath, buildPath)
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -0,0 +1,347 @@
|
|||
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
class TDTestCase:
|
||||
hostname = socket.gethostname()
|
||||
#rpcDebugFlagVal = '143'
|
||||
#clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
#clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
#updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
#updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
#print ("===================: ", updatecfgDict)
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def newcur(self,cfg,host,port):
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
|
||||
cur=con.cursor()
|
||||
print(cur)
|
||||
return cur
|
||||
|
||||
def initConsumerTable(self,cdbName='cdb'):
|
||||
tdLog.info("create consume database, and consume info table, and consume result table")
|
||||
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
|
||||
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
|
||||
tdLog.info("consume info sql: %s"%sql)
|
||||
tdSql.query(sql)
|
||||
|
||||
def selectConsumeResult(self,expectRows,cdbName='cdb'):
|
||||
resultList=[]
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
for i in range(expectRows):
|
||||
tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
resultList.append(tdSql.getData(i , 3))
|
||||
|
||||
return resultList
|
||||
|
||||
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
|
||||
if valgrind == 1:
|
||||
logFile = cfgPath + '/../log/valgrind-tmq.log'
|
||||
shellCmd = 'nohup valgrind --log-file=' + logFile
|
||||
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
|
||||
|
||||
if (platform.system().lower() == 'windows'):
|
||||
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> nul 2>&1 &"
|
||||
else:
|
||||
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> /dev/null 2>&1 &"
|
||||
tdLog.info(shellCmd)
|
||||
os.system(shellCmd)
|
||||
|
||||
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
|
||||
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
|
||||
tsql.execute("use %s" %dbName)
|
||||
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
|
||||
if (i > 0) and (i%100 == 0):
|
||||
tsql.execute(sql)
|
||||
sql = pre_create
|
||||
if sql != pre_create:
|
||||
tsql.execute(sql)
|
||||
|
||||
event.set()
|
||||
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
|
||||
return
|
||||
|
||||
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs):
|
||||
tdLog.debug("start to insert data ............")
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d values "%(stbName,i)
|
||||
for j in range(rowsPerTbl):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
|
||||
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
|
||||
tsql.execute(sql)
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s_%d values " %(stbName,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
if sql != pre_insert:
|
||||
#print("insert sql:%s"%sql)
|
||||
tsql.execute(sql)
|
||||
tdLog.debug("insert data ............ [OK]")
|
||||
return
|
||||
|
||||
def prepareEnv(self, **parameterDict):
|
||||
print ("input parameters:")
|
||||
print (parameterDict)
|
||||
# create new connector for my thread
|
||||
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
|
||||
self.create_tables(tsql,\
|
||||
parameterDict["dbName"],\
|
||||
parameterDict["vgroups"],\
|
||||
parameterDict["stbName"],\
|
||||
parameterDict["ctbNum"],\
|
||||
parameterDict["rowsPerTbl"])
|
||||
|
||||
self.insert_data(tsql,\
|
||||
parameterDict["dbName"],\
|
||||
parameterDict["stbName"],\
|
||||
parameterDict["ctbNum"],\
|
||||
parameterDict["rowsPerTbl"],\
|
||||
parameterDict["batchNum"],\
|
||||
parameterDict["startTs"])
|
||||
return
|
||||
|
||||
def tmqCase8(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 8: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db8', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
self.initConsumerTable()
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 8 end ...... ")
|
||||
|
||||
def tmqCase9(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 9: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db9', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdSql.query("select count(*) from %s.%s" %(parameterDict['dbName'], parameterDict['stbName']))
|
||||
countOfStb = tdSql.getData(0,0)
|
||||
print ("====total rows of stb: %d"%countOfStb)
|
||||
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
if totalConsumeRows < expectrowcnt:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
self.initConsumerTable()
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows2 = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows2 += resultList[i]
|
||||
|
||||
tdLog.info("firstly act consume rows: %d"%(totalConsumeRows))
|
||||
tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt))
|
||||
if totalConsumeRows + totalConsumeRows2 != expectrowcnt:
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 9 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
cfgPath = buildPath + "/../sim/psim/cfg"
|
||||
tdLog.info("cfgPath: %s" % cfgPath)
|
||||
|
||||
self.tmqCase8(cfgPath, buildPath)
|
||||
self.tmqCase9(cfgPath, buildPath)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,337 @@
|
|||
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
class TDTestCase:
|
||||
hostname = socket.gethostname()
|
||||
#rpcDebugFlagVal = '143'
|
||||
#clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
#clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
#updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
#updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
#print ("===================: ", updatecfgDict)
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def newcur(self,cfg,host,port):
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
|
||||
cur=con.cursor()
|
||||
print(cur)
|
||||
return cur
|
||||
|
||||
def initConsumerTable(self,cdbName='cdb'):
|
||||
tdLog.info("create consume database, and consume info table, and consume result table")
|
||||
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
|
||||
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
|
||||
tdLog.info("consume info sql: %s"%sql)
|
||||
tdSql.query(sql)
|
||||
|
||||
def selectConsumeResult(self,expectRows,cdbName='cdb'):
|
||||
resultList=[]
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
for i in range(expectRows):
|
||||
tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
resultList.append(tdSql.getData(i , 3))
|
||||
|
||||
return resultList
|
||||
|
||||
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
|
||||
if valgrind == 1:
|
||||
logFile = cfgPath + '/../log/valgrind-tmq.log'
|
||||
shellCmd = 'nohup valgrind --log-file=' + logFile
|
||||
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
|
||||
|
||||
if (platform.system().lower() == 'windows'):
|
||||
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> nul 2>&1 &"
|
||||
else:
|
||||
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> /dev/null 2>&1 &"
|
||||
tdLog.info(shellCmd)
|
||||
os.system(shellCmd)
|
||||
|
||||
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
|
||||
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
|
||||
tsql.execute("use %s" %dbName)
|
||||
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
|
||||
if (i > 0) and (i%100 == 0):
|
||||
tsql.execute(sql)
|
||||
sql = pre_create
|
||||
if sql != pre_create:
|
||||
tsql.execute(sql)
|
||||
|
||||
event.set()
|
||||
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
|
||||
return
|
||||
|
||||
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs):
|
||||
tdLog.debug("start to insert data ............")
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d values "%(stbName,i)
|
||||
for j in range(rowsPerTbl):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
|
||||
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
|
||||
tsql.execute(sql)
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s_%d values " %(stbName,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
if sql != pre_insert:
|
||||
#print("insert sql:%s"%sql)
|
||||
tsql.execute(sql)
|
||||
tdLog.debug("insert data ............ [OK]")
|
||||
return
|
||||
|
||||
def prepareEnv(self, **parameterDict):
|
||||
print ("input parameters:")
|
||||
print (parameterDict)
|
||||
# create new connector for my thread
|
||||
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
|
||||
self.create_tables(tsql,\
|
||||
parameterDict["dbName"],\
|
||||
parameterDict["vgroups"],\
|
||||
parameterDict["stbName"],\
|
||||
parameterDict["ctbNum"],\
|
||||
parameterDict["rowsPerTbl"])
|
||||
|
||||
self.insert_data(tsql,\
|
||||
parameterDict["dbName"],\
|
||||
parameterDict["stbName"],\
|
||||
parameterDict["ctbNum"],\
|
||||
parameterDict["rowsPerTbl"],\
|
||||
parameterDict["batchNum"],\
|
||||
parameterDict["startTs"])
|
||||
return
|
||||
|
||||
def tmqCase10(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 10: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db10', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info("pkill consume processor")
|
||||
if (platform.system().lower() == 'windows'):
|
||||
os.system("TASKKILL /F /IM tmq_sim.exe")
|
||||
else:
|
||||
os.system('pkill tmq_sim')
|
||||
expectRows = 0
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
time.sleep(15)
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 10 end ...... ")
|
||||
|
||||
def tmqCase11(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 11: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db11', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:true,\
|
||||
auto.commit.interval.ms:1000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 20
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
time.sleep(6)
|
||||
tdLog.info("pkill consume processor")
|
||||
if (platform.system().lower() == 'windows'):
|
||||
os.system("TASKKILL /F /IM tmq_sim.exe")
|
||||
else:
|
||||
os.system('pkill tmq_sim')
|
||||
expectRows = 0
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
|
||||
tdLog.info("again start consume processer")
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0:
|
||||
tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
time.sleep(15)
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 11 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
cfgPath = buildPath + "/../sim/psim/cfg"
|
||||
tdLog.info("cfgPath: %s" % cfgPath)
|
||||
|
||||
self.tmqCase10(cfgPath, buildPath)
|
||||
self.tmqCase11(cfgPath, buildPath)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -98,6 +98,12 @@ python3 ./test.py -f 2-query/stateduration.py
|
|||
python3 ./test.py -f 2-query/function_stateduration.py
|
||||
python3 ./test.py -f 2-query/statecount.py
|
||||
python3 ./test.py -f 2-query/tail.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_count.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_max.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_min.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_sum.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_spread.py
|
||||
python3 ./test.py -f 2-query/distribute_agg_apercentile.py
|
||||
|
||||
python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
||||
python3 ./test.py -f 6-cluster/5dnode2mnode.py
|
||||
|
|
|
@ -855,8 +855,7 @@ void shellGetGrantInfo() {
|
|||
if (code == TSDB_CODE_OPS_NOT_SUPPORT) {
|
||||
fprintf(stdout, "Server is Community Edition, %s\n\n", sinfo);
|
||||
} else {
|
||||
fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\n\n", taos_errno(shell.conn),
|
||||
taos_errstr(shell.conn));
|
||||
fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\n\n", code, taos_errstr(tres));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue