Merge branch '3.0' into 3.0test/jcy
This commit is contained in:
commit
2e1d69ce8c
|
@ -52,6 +52,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption);
|
||||||
* @param pMnode The mnode object to close.
|
* @param pMnode The mnode object to close.
|
||||||
*/
|
*/
|
||||||
void mndClose(SMnode *pMnode);
|
void mndClose(SMnode *pMnode);
|
||||||
|
void mndPreClose(SMnode *pMnode);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Start mnode
|
* @brief Start mnode
|
||||||
|
|
|
@ -69,10 +69,9 @@ typedef void (*_ref_fn_t)(const void *pObj);
|
||||||
#define T_REF_VAL_GET(x) (x)->_ref.val
|
#define T_REF_VAL_GET(x) (x)->_ref.val
|
||||||
|
|
||||||
// single writer multiple reader lock
|
// single writer multiple reader lock
|
||||||
typedef volatile int64_t SRWLatch;
|
typedef volatile int32_t SRWLatch;
|
||||||
|
|
||||||
void taosInitRWLatch(SRWLatch *pLatch);
|
void taosInitRWLatch(SRWLatch *pLatch);
|
||||||
void taosInitReentrantRWLatch(SRWLatch *pLatch);
|
|
||||||
void taosWLockLatch(SRWLatch *pLatch);
|
void taosWLockLatch(SRWLatch *pLatch);
|
||||||
void taosWUnLockLatch(SRWLatch *pLatch);
|
void taosWUnLockLatch(SRWLatch *pLatch);
|
||||||
void taosRLockLatch(SRWLatch *pLatch);
|
void taosRLockLatch(SRWLatch *pLatch);
|
||||||
|
|
|
@ -169,6 +169,7 @@ typedef struct SReqResultInfo {
|
||||||
uint32_t numOfRows;
|
uint32_t numOfRows;
|
||||||
uint64_t totalRows;
|
uint64_t totalRows;
|
||||||
uint32_t current;
|
uint32_t current;
|
||||||
|
bool localResultFetched;
|
||||||
bool completed;
|
bool completed;
|
||||||
int32_t precision;
|
int32_t precision;
|
||||||
bool convertUcs4;
|
bool convertUcs4;
|
||||||
|
|
|
@ -1905,6 +1905,10 @@ int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, i
|
||||||
tbLen = len1;
|
tbLen = len1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dbLen <= 0 || tbLen <= 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (tNameSetDbName(&name, acctId, dbName, dbLen)) {
|
if (tNameSetDbName(&name, acctId, dbName, dbLen)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -852,23 +852,33 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// all data has returned to App already, no need to try again
|
// all data has returned to App already, no need to try again
|
||||||
if ((pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) && pResultInfo->completed) {
|
if (pResultInfo->completed && (pRequest->body.queryJob != 0)) {
|
||||||
pResultInfo->numOfRows = 0;
|
pResultInfo->numOfRows = 0;
|
||||||
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// it is a local executed query, no need to do async fetch
|
// it is a local executed query, no need to do async fetch
|
||||||
if (pResultInfo->current < pResultInfo->numOfRows && pRequest->body.queryJob == 0) {
|
if (pRequest->body.queryJob == 0) {
|
||||||
|
ASSERT(pResultInfo->completed && pResultInfo->numOfRows >= 0);
|
||||||
|
if (pResultInfo->localResultFetched) {
|
||||||
|
pResultInfo->numOfRows = 0;
|
||||||
|
pResultInfo->current = 0;
|
||||||
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
||||||
|
} else {
|
||||||
|
pResultInfo->localResultFetched = true;
|
||||||
|
pRequest->body.fetchFp(param, pRequest, pResultInfo->numOfRows);
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
SSchedulerReq req = {
|
SSchedulerReq req = {
|
||||||
.syncReq = false,
|
.syncReq = false,
|
||||||
.fetchFp = fetchCallback,
|
.fetchFp = fetchCallback,
|
||||||
.cbParam = pRequest,
|
.cbParam = pRequest,
|
||||||
};
|
};
|
||||||
|
|
||||||
schedulerFetchRows(pRequest->body.queryJob, &req);
|
schedulerFetchRows(pRequest->body.queryJob, &req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -880,10 +890,10 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
||||||
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
|
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
|
||||||
|
|
||||||
// set the current block is all consumed
|
// set the current block is all consumed
|
||||||
pResultInfo->current = pResultInfo->numOfRows;
|
|
||||||
pResultInfo->convertUcs4 = false;
|
pResultInfo->convertUcs4 = false;
|
||||||
|
|
||||||
taos_fetch_rows_a(res, fp, param);
|
// it is a local executed query, no need to do async fetch
|
||||||
|
taos_fetch_rows_a(pRequest, fp, param);
|
||||||
}
|
}
|
||||||
|
|
||||||
const void *taos_get_raw_block(TAOS_RES *res) {
|
const void *taos_get_raw_block(TAOS_RES *res) {
|
||||||
|
|
|
@ -114,7 +114,7 @@ int32_t tsMinSlidingTime = 10;
|
||||||
// the maxinum number of distict query result
|
// the maxinum number of distict query result
|
||||||
int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
|
int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
|
||||||
|
|
||||||
// 1 us for interval time range, changed accordingly
|
// 1 database precision unit for interval time range, changed accordingly
|
||||||
int32_t tsMinIntervalTime = 1;
|
int32_t tsMinIntervalTime = 1;
|
||||||
|
|
||||||
// 20sec, the maximum value of stream computing delay, changed accordingly
|
// 20sec, the maximum value of stream computing delay, changed accordingly
|
||||||
|
|
|
@ -150,6 +150,7 @@ static void mmStop(SMnodeMgmt *pMgmt) {
|
||||||
dDebug("mnode-mgmt start to stop");
|
dDebug("mnode-mgmt start to stop");
|
||||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||||
pMgmt->stopped = 1;
|
pMgmt->stopped = 1;
|
||||||
|
mndPreClose(pMgmt->pMnode);
|
||||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
|
|
||||||
mndStop(pMgmt->pMnode);
|
mndStop(pMgmt->pMnode);
|
||||||
|
|
|
@ -75,11 +75,13 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
||||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||||
char path[TSDB_FILENAME_LEN] = {0};
|
char path[TSDB_FILENAME_LEN] = {0};
|
||||||
|
|
||||||
|
vnodePreClose(pVnode->pImpl);
|
||||||
|
|
||||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||||
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
|
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
|
||||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||||
|
|
||||||
vmReleaseVnode(pMgmt, pVnode);
|
vmReleaseVnode(pMgmt, pVnode);
|
||||||
|
|
||||||
while (pVnode->refCount > 0) taosMsleep(10);
|
while (pVnode->refCount > 0) taosMsleep(10);
|
||||||
dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId);
|
dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId);
|
||||||
|
|
||||||
|
|
|
@ -366,6 +366,12 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
|
||||||
return pMnode;
|
return pMnode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void mndPreClose(SMnode *pMnode) {
|
||||||
|
if (pMnode != NULL) {
|
||||||
|
syncLeaderTransfer(pMnode->syncMgmt.sync);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void mndClose(SMnode *pMnode) {
|
void mndClose(SMnode *pMnode) {
|
||||||
if (pMnode != NULL) {
|
if (pMnode != NULL) {
|
||||||
mDebug("start to close mnode");
|
mDebug("start to close mnode");
|
||||||
|
|
|
@ -199,6 +199,7 @@ int32_t mndInitSync(SMnode *pMnode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// decrease election timer
|
// decrease election timer
|
||||||
|
setPingTimerMS(pMgmt->sync, 5000);
|
||||||
setElectTimerMS(pMgmt->sync, 600);
|
setElectTimerMS(pMgmt->sync, 600);
|
||||||
setHeartbeatTimerMS(pMgmt->sync, 300);
|
setHeartbeatTimerMS(pMgmt->sync, 300);
|
||||||
|
|
||||||
|
|
|
@ -51,6 +51,7 @@ void vnodeCleanup();
|
||||||
int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
|
int32_t vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs);
|
||||||
void vnodeDestroy(const char *path, STfs *pTfs);
|
void vnodeDestroy(const char *path, STfs *pTfs);
|
||||||
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
|
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
|
||||||
|
void vnodePreClose(SVnode *pVnode);
|
||||||
void vnodeClose(SVnode *pVnode);
|
void vnodeClose(SVnode *pVnode);
|
||||||
|
|
||||||
int32_t vnodeStart(SVnode *pVnode);
|
int32_t vnodeStart(SVnode *pVnode);
|
||||||
|
|
|
@ -175,6 +175,12 @@ _err:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vnodePreClose(SVnode *pVnode) {
|
||||||
|
if (pVnode) {
|
||||||
|
syncLeaderTransfer(pVnode->sync);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void vnodeClose(SVnode *pVnode) {
|
void vnodeClose(SVnode *pVnode) {
|
||||||
if (pVnode) {
|
if (pVnode) {
|
||||||
vnodeCommit(pVnode);
|
vnodeCommit(pVnode);
|
||||||
|
|
|
@ -569,7 +569,7 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
setPingTimerMS(pVnode->sync, 3000);
|
setPingTimerMS(pVnode->sync, 5000);
|
||||||
setElectTimerMS(pVnode->sync, 500);
|
setElectTimerMS(pVnode->sync, 500);
|
||||||
setHeartbeatTimerMS(pVnode->sync, 100);
|
setHeartbeatTimerMS(pVnode->sync, 100);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -482,33 +482,33 @@ typedef struct SCtgOperation {
|
||||||
|
|
||||||
#define CTG_LOCK(type, _lock) do { \
|
#define CTG_LOCK(type, _lock) do { \
|
||||||
if (CTG_READ == (type)) { \
|
if (CTG_READ == (type)) { \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
CTG_LOCK_DEBUG("CTG RLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
CTG_LOCK_DEBUG("CTG RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosRLockLatch(_lock); \
|
taosRLockLatch(_lock); \
|
||||||
CTG_LOCK_DEBUG("CTG RLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
CTG_LOCK_DEBUG("CTG RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) > 0); \
|
assert(atomic_load_32((_lock)) > 0); \
|
||||||
} else { \
|
} else { \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
CTG_LOCK_DEBUG("CTG WLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
CTG_LOCK_DEBUG("CTG WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosWLockLatch(_lock); \
|
taosWLockLatch(_lock); \
|
||||||
CTG_LOCK_DEBUG("CTG WLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
CTG_LOCK_DEBUG("CTG WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
assert(atomic_load_32((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define CTG_UNLOCK(type, _lock) do { \
|
#define CTG_UNLOCK(type, _lock) do { \
|
||||||
if (CTG_READ == (type)) { \
|
if (CTG_READ == (type)) { \
|
||||||
assert(atomic_load_64((_lock)) > 0); \
|
assert(atomic_load_32((_lock)) > 0); \
|
||||||
CTG_LOCK_DEBUG("CTG RULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
CTG_LOCK_DEBUG("CTG RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosRUnLockLatch(_lock); \
|
taosRUnLockLatch(_lock); \
|
||||||
CTG_LOCK_DEBUG("CTG RULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
CTG_LOCK_DEBUG("CTG RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
} else { \
|
} else { \
|
||||||
assert(atomic_load_64((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
assert(atomic_load_32((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||||
CTG_LOCK_DEBUG("CTG WULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
CTG_LOCK_DEBUG("CTG WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosWUnLockLatch(_lock); \
|
taosWUnLockLatch(_lock); \
|
||||||
CTG_LOCK_DEBUG("CTG WULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
CTG_LOCK_DEBUG("CTG WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
|
@ -538,7 +538,7 @@ static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCt
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SqlFunctionCtx* pCtx) {
|
static int32_t doAggregateImpl(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx) {
|
||||||
for (int32_t k = 0; k < pOperator->exprSupp.numOfExprs; ++k) {
|
for (int32_t k = 0; k < pOperator->exprSupp.numOfExprs; ++k) {
|
||||||
if (functionNeedToExecute(&pCtx[k])) {
|
if (functionNeedToExecute(&pCtx[k])) {
|
||||||
// todo add a dummy funtion to avoid process check
|
// todo add a dummy funtion to avoid process check
|
||||||
|
@ -2969,25 +2969,10 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
|
||||||
// the pDataBlock are always the same one, no need to call this again
|
// the pDataBlock are always the same one, no need to call this again
|
||||||
setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.groupId, pAggInfo);
|
setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.groupId, pAggInfo);
|
||||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, true);
|
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, true);
|
||||||
code = doAggregateImpl(pOperator, 0, pSup->pCtx);
|
code = doAggregateImpl(pOperator, pSup->pCtx);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
longjmp(pTaskInfo->env, code);
|
longjmp(pTaskInfo->env, code);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0 // test for encode/decode result info
|
|
||||||
if(pOperator->fpSet.encodeResultRow){
|
|
||||||
char *result = NULL;
|
|
||||||
int32_t length = 0;
|
|
||||||
pOperator->fpSet.encodeResultRow(pOperator, &result, &length);
|
|
||||||
SAggSupporter* pSup = &pAggInfo->aggSup;
|
|
||||||
taosHashClear(pSup->pResultRowHashTable);
|
|
||||||
pInfo->resultRowInfo.size = 0;
|
|
||||||
pOperator->fpSet.decodeResultRow(pOperator, result);
|
|
||||||
if(result){
|
|
||||||
taosMemoryFree(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
closeAllResultRows(&pAggInfo->binfo.resultRowInfo);
|
closeAllResultRows(&pAggInfo->binfo.resultRowInfo);
|
||||||
|
|
|
@ -2860,101 +2860,3 @@ _error:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
|
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
SLastrowScanInfo* pInfo = pOperator->info;
|
|
||||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
|
||||||
|
|
||||||
int32_t size = taosArrayGetSize(pInfo->pTableList);
|
|
||||||
if (size == 0) {
|
|
||||||
setTaskStatus(pTaskInfo, TASK_COMPLETED);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if it is a group by tbname
|
|
||||||
if (size == taosArrayGetSize(pInfo->pTableList)) {
|
|
||||||
blockDataCleanup(pInfo->pRes);
|
|
||||||
tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds);
|
|
||||||
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
|
|
||||||
} else {
|
|
||||||
// todo fetch the result for each group
|
|
||||||
}
|
|
||||||
|
|
||||||
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void destroyLastrowScanOperator(void* param, int32_t numOfOutput) {
|
|
||||||
SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param;
|
|
||||||
blockDataDestroy(pInfo->pRes);
|
|
||||||
tsdbLastrowReaderClose(pInfo->pLastrowReader);
|
|
||||||
|
|
||||||
taosMemoryFreeClear(param);
|
|
||||||
}
|
|
||||||
|
|
||||||
SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, SArray* pTableList,
|
|
||||||
SExecTaskInfo* pTaskInfo) {
|
|
||||||
SLastrowScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SLastrowScanInfo));
|
|
||||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
|
||||||
if (pInfo == NULL || pOperator == NULL) {
|
|
||||||
goto _error;
|
|
||||||
}
|
|
||||||
|
|
||||||
pInfo->pTableList = pTableList;
|
|
||||||
pInfo->readHandle = *readHandle;
|
|
||||||
pInfo->pRes = createResDataBlock(pScanNode->node.pOutputDataBlockDesc);
|
|
||||||
|
|
||||||
int32_t numOfCols = 0;
|
|
||||||
pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->pScanCols, pScanNode->node.pOutputDataBlockDesc, &numOfCols,
|
|
||||||
COL_MATCH_FROM_COL_ID);
|
|
||||||
int32_t* pCols = taosMemoryMalloc(numOfCols * sizeof(int32_t));
|
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
|
||||||
SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i);
|
|
||||||
pCols[i] = pColMatch->colId;
|
|
||||||
}
|
|
||||||
|
|
||||||
pInfo->pSlotIds = taosMemoryMalloc(numOfCols * sizeof(pInfo->pSlotIds[0]));
|
|
||||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
|
||||||
SColMatchInfo* pColMatch = taosArrayGet(pInfo->pColMatchInfo, i);
|
|
||||||
for (int32_t j = 0; j < pTaskInfo->schemaVer.sw->nCols; ++j) {
|
|
||||||
if (pColMatch->colId == pTaskInfo->schemaVer.sw->pSchema[j].colId &&
|
|
||||||
pColMatch->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
|
|
||||||
pInfo->pSlotIds[pColMatch->targetSlotId] = -1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pColMatch->colId == pTaskInfo->schemaVer.sw->pSchema[j].colId) {
|
|
||||||
pInfo->pSlotIds[pColMatch->targetSlotId] = j;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tsdbLastRowReaderOpen(readHandle->vnode, LASTROW_RETRIEVE_TYPE_ALL, pTableList, pCols, numOfCols,
|
|
||||||
&pInfo->pLastrowReader);
|
|
||||||
taosMemoryFree(pCols);
|
|
||||||
|
|
||||||
pOperator->name = "LastrowScanOperator";
|
|
||||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN;
|
|
||||||
pOperator->blocking = false;
|
|
||||||
pOperator->status = OP_NOT_OPENED;
|
|
||||||
pOperator->info = pInfo;
|
|
||||||
pOperator->pTaskInfo = pTaskInfo;
|
|
||||||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
|
|
||||||
|
|
||||||
initResultSizeInfo(pOperator, 1024);
|
|
||||||
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
|
|
||||||
|
|
||||||
pOperator->fpSet =
|
|
||||||
createOperatorFpSet(operatorDummyOpenFn, doScanLastrow, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL);
|
|
||||||
pOperator->cost.openCost = 0;
|
|
||||||
return pOperator;
|
|
||||||
|
|
||||||
_error:
|
|
||||||
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
|
|
||||||
taosMemoryFree(pInfo);
|
|
||||||
taosMemoryFree(pOperator);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
|
@ -338,6 +338,104 @@ typedef struct SGroupKeyInfo {
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define LIST_ADD_N(_res, _col, _start, _rows, _t, numOfElem) \
|
||||||
|
do { \
|
||||||
|
_t* d = (_t*)(_col->pData); \
|
||||||
|
for (int32_t i = (_start); i < (_rows) + (_start); ++i) { \
|
||||||
|
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
||||||
|
continue; \
|
||||||
|
}; \
|
||||||
|
(_res) += (d)[i]; \
|
||||||
|
(numOfElem)++; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define LIST_SUB_N(_res, _col, _start, _rows, _t, numOfElem) \
|
||||||
|
do { \
|
||||||
|
_t* d = (_t*)(_col->pData); \
|
||||||
|
for (int32_t i = (_start); i < (_rows) + (_start); ++i) { \
|
||||||
|
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
||||||
|
continue; \
|
||||||
|
}; \
|
||||||
|
(_res) -= (d)[i]; \
|
||||||
|
(numOfElem)++; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define LIST_AVG_N(sumT, T) \
|
||||||
|
do { \
|
||||||
|
T* plist = (T*)pCol->pData; \
|
||||||
|
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { \
|
||||||
|
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { \
|
||||||
|
continue; \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
numOfElem += 1; \
|
||||||
|
pAvgRes->count -= 1; \
|
||||||
|
sumT -= plist[i]; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define LIST_STDDEV_SUB_N(sumT, T) \
|
||||||
|
do { \
|
||||||
|
T* plist = (T*)pCol->pData; \
|
||||||
|
for (int32_t i = start; i < numOfRows + start; ++i) { \
|
||||||
|
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { \
|
||||||
|
continue; \
|
||||||
|
} \
|
||||||
|
numOfElem += 1; \
|
||||||
|
pStddevRes->count -= 1; \
|
||||||
|
sumT -= plist[i]; \
|
||||||
|
pStddevRes->quadraticISum -= plist[i] * plist[i]; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define LEASTSQR_CAL(p, x, y, index, step) \
|
||||||
|
do { \
|
||||||
|
(p)[0][0] += (double)(x) * (x); \
|
||||||
|
(p)[0][1] += (double)(x); \
|
||||||
|
(p)[0][2] += (double)(x) * (y)[index]; \
|
||||||
|
(p)[1][2] += (y)[index]; \
|
||||||
|
(x) += step; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
#define STATE_COMP(_op, _lval, _param) STATE_COMP_IMPL(_op, _lval, GET_STATE_VAL(_param))
|
||||||
|
|
||||||
|
#define GET_STATE_VAL(param) ((param.nType == TSDB_DATA_TYPE_BIGINT) ? (param.i) : (param.d))
|
||||||
|
|
||||||
|
#define STATE_COMP_IMPL(_op, _lval, _rval) \
|
||||||
|
do { \
|
||||||
|
switch (_op) { \
|
||||||
|
case STATE_OPER_LT: \
|
||||||
|
return ((_lval) < (_rval)); \
|
||||||
|
break; \
|
||||||
|
case STATE_OPER_GT: \
|
||||||
|
return ((_lval) > (_rval)); \
|
||||||
|
break; \
|
||||||
|
case STATE_OPER_LE: \
|
||||||
|
return ((_lval) <= (_rval)); \
|
||||||
|
break; \
|
||||||
|
case STATE_OPER_GE: \
|
||||||
|
return ((_lval) >= (_rval)); \
|
||||||
|
break; \
|
||||||
|
case STATE_OPER_NE: \
|
||||||
|
return ((_lval) != (_rval)); \
|
||||||
|
break; \
|
||||||
|
case STATE_OPER_EQ: \
|
||||||
|
return ((_lval) == (_rval)); \
|
||||||
|
break; \
|
||||||
|
default: \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define INIT_INTP_POINT(_p, _k, _v) \
|
||||||
|
do { \
|
||||||
|
(_p).key = (_k); \
|
||||||
|
(_p).val = (_v); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
bool dummyGetEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* UNUSED_PARAM(pEnv)) { return true; }
|
bool dummyGetEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* UNUSED_PARAM(pEnv)) { return true; }
|
||||||
|
|
||||||
bool dummyInit(SqlFunctionCtx* UNUSED_PARAM(pCtx), SResultRowEntryInfo* UNUSED_PARAM(pResultInfo)) { return true; }
|
bool dummyInit(SqlFunctionCtx* UNUSED_PARAM(pCtx), SResultRowEntryInfo* UNUSED_PARAM(pResultInfo)) { return true; }
|
||||||
|
@ -499,30 +597,6 @@ int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define LIST_ADD_N(_res, _col, _start, _rows, _t, numOfElem) \
|
|
||||||
do { \
|
|
||||||
_t* d = (_t*)(_col->pData); \
|
|
||||||
for (int32_t i = (_start); i < (_rows) + (_start); ++i) { \
|
|
||||||
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
|
||||||
continue; \
|
|
||||||
}; \
|
|
||||||
(_res) += (d)[i]; \
|
|
||||||
(numOfElem)++; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define LIST_SUB_N(_res, _col, _start, _rows, _t, numOfElem) \
|
|
||||||
do { \
|
|
||||||
_t* d = (_t*)(_col->pData); \
|
|
||||||
for (int32_t i = (_start); i < (_rows) + (_start); ++i) { \
|
|
||||||
if (((_col)->hasNull) && colDataIsNull_f((_col)->nullbitmap, i)) { \
|
|
||||||
continue; \
|
|
||||||
}; \
|
|
||||||
(_res) -= (d)[i]; \
|
|
||||||
(numOfElem)++; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
int32_t sumFunction(SqlFunctionCtx* pCtx) {
|
int32_t sumFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElem = 0;
|
int32_t numOfElem = 0;
|
||||||
|
|
||||||
|
@ -920,20 +994,6 @@ int32_t avgFunctionMerge(SqlFunctionCtx* pCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define LIST_AVG_N(sumT, T) \
|
|
||||||
do { \
|
|
||||||
T* plist = (T*)pCol->pData; \
|
|
||||||
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) { \
|
|
||||||
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { \
|
|
||||||
continue; \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
numOfElem += 1; \
|
|
||||||
pAvgRes->count -= 1; \
|
|
||||||
sumT -= plist[i]; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
|
int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElem = 0;
|
int32_t numOfElem = 0;
|
||||||
|
|
||||||
|
@ -1884,20 +1944,6 @@ int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define LIST_STDDEV_SUB_N(sumT, T) \
|
|
||||||
do { \
|
|
||||||
T* plist = (T*)pCol->pData; \
|
|
||||||
for (int32_t i = start; i < numOfRows + start; ++i) { \
|
|
||||||
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) { \
|
|
||||||
continue; \
|
|
||||||
} \
|
|
||||||
numOfElem += 1; \
|
|
||||||
pStddevRes->count -= 1; \
|
|
||||||
sumT -= plist[i]; \
|
|
||||||
pStddevRes->quadraticISum -= plist[i] * plist[i]; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) {
|
int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElem = 0;
|
int32_t numOfElem = 0;
|
||||||
|
|
||||||
|
@ -2046,15 +2092,6 @@ bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInf
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define LEASTSQR_CAL(p, x, y, index, step) \
|
|
||||||
do { \
|
|
||||||
(p)[0][0] += (double)(x) * (x); \
|
|
||||||
(p)[0][1] += (double)(x); \
|
|
||||||
(p)[0][2] += (double)(x) * (y)[index]; \
|
|
||||||
(p)[1][2] += (y)[index]; \
|
|
||||||
(x) += step; \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
int32_t leastSQRFunction(SqlFunctionCtx* pCtx) {
|
int32_t leastSQRFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElem = 0;
|
int32_t numOfElem = 0;
|
||||||
|
|
||||||
|
@ -2733,7 +2770,6 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pInfo->hasResult = true;
|
pInfo->hasResult = true;
|
||||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
|
||||||
pResInfo->numOfRes = 1;
|
pResInfo->numOfRes = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2830,7 +2866,6 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
||||||
}
|
}
|
||||||
pInfo->hasResult = true;
|
pInfo->hasResult = true;
|
||||||
pResInfo->numOfRes = 1;
|
pResInfo->numOfRes = 1;
|
||||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -4477,36 +4512,6 @@ static int8_t getStateOpType(char* opStr) {
|
||||||
return opType;
|
return opType;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GET_STATE_VAL(param) ((param.nType == TSDB_DATA_TYPE_BIGINT) ? (param.i) : (param.d))
|
|
||||||
|
|
||||||
#define STATE_COMP(_op, _lval, _param) STATE_COMP_IMPL(_op, _lval, GET_STATE_VAL(_param))
|
|
||||||
|
|
||||||
#define STATE_COMP_IMPL(_op, _lval, _rval) \
|
|
||||||
do { \
|
|
||||||
switch (_op) { \
|
|
||||||
case STATE_OPER_LT: \
|
|
||||||
return ((_lval) < (_rval)); \
|
|
||||||
break; \
|
|
||||||
case STATE_OPER_GT: \
|
|
||||||
return ((_lval) > (_rval)); \
|
|
||||||
break; \
|
|
||||||
case STATE_OPER_LE: \
|
|
||||||
return ((_lval) <= (_rval)); \
|
|
||||||
break; \
|
|
||||||
case STATE_OPER_GE: \
|
|
||||||
return ((_lval) >= (_rval)); \
|
|
||||||
break; \
|
|
||||||
case STATE_OPER_NE: \
|
|
||||||
return ((_lval) != (_rval)); \
|
|
||||||
break; \
|
|
||||||
case STATE_OPER_EQ: \
|
|
||||||
return ((_lval) == (_rval)); \
|
|
||||||
break; \
|
|
||||||
default: \
|
|
||||||
break; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
static bool checkStateOp(int8_t op, SColumnInfoData* pCol, int32_t index, SVariant param) {
|
static bool checkStateOp(int8_t op, SColumnInfoData* pCol, int32_t index, SVariant param) {
|
||||||
char* data = colDataGetData(pCol, index);
|
char* data = colDataGetData(pCol, index);
|
||||||
switch (pCol->info.type) {
|
switch (pCol->info.type) {
|
||||||
|
@ -5214,12 +5219,6 @@ static double twa_get_area(SPoint1 s, SPoint1 e) {
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define INIT_INTP_POINT(_p, _k, _v) \
|
|
||||||
do { \
|
|
||||||
(_p).key = (_k); \
|
|
||||||
(_p).val = (_v); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
int32_t twaFunction(SqlFunctionCtx* pCtx) {
|
||||||
SInputColumnInfoData* pInput = &pCtx->input;
|
SInputColumnInfoData* pInput = &pCtx->input;
|
||||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||||
|
@ -6009,6 +6008,15 @@ int32_t lastrowFunction(SqlFunctionCtx* pCtx) {
|
||||||
|
|
||||||
pInfo->hasResult = true;
|
pInfo->hasResult = true;
|
||||||
pResInfo->numOfRes = 1;
|
pResInfo->numOfRes = 1;
|
||||||
|
|
||||||
|
if (pCtx->subsidiaries.num > 0) {
|
||||||
|
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
||||||
|
if (!pInfo->hasResult) {
|
||||||
|
saveTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
||||||
|
} else {
|
||||||
|
copyTupleData(pCtx, i, pCtx->pSrcBlock, pTuplePos);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2481,7 +2481,6 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) {
|
||||||
int32_t code = tjsonToObject(pJson, jkSubplanId, jsonToSubplanId, &pNode->id);
|
int32_t code = tjsonToObject(pJson, jkSubplanId, jsonToSubplanId, &pNode->id);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
tjsonGetNumberValue(pJson, jkSubplanType, pNode->subplanType, code);
|
tjsonGetNumberValue(pJson, jkSubplanType, pNode->subplanType, code);
|
||||||
;
|
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = tjsonGetIntValue(pJson, jkSubplanMsgType, &pNode->msgType);
|
code = tjsonGetIntValue(pJson, jkSubplanMsgType, &pNode->msgType);
|
||||||
|
|
|
@ -956,7 +956,8 @@ void nodesDestroyNode(SNode* pNode) {
|
||||||
}
|
}
|
||||||
case QUERY_NODE_PHYSICAL_SUBPLAN: {
|
case QUERY_NODE_PHYSICAL_SUBPLAN: {
|
||||||
SSubplan* pSubplan = (SSubplan*)pNode;
|
SSubplan* pSubplan = (SSubplan*)pNode;
|
||||||
nodesDestroyList(pSubplan->pChildren);
|
// nodesDestroyList(pSubplan->pChildren);
|
||||||
|
nodesClearList(pSubplan->pChildren);
|
||||||
nodesDestroyNode((SNode*)pSubplan->pNode);
|
nodesDestroyNode((SNode*)pSubplan->pNode);
|
||||||
nodesDestroyNode((SNode*)pSubplan->pDataSink);
|
nodesDestroyNode((SNode*)pSubplan->pDataSink);
|
||||||
nodesDestroyNode((SNode*)pSubplan->pTagCond);
|
nodesDestroyNode((SNode*)pSubplan->pTagCond);
|
||||||
|
@ -972,7 +973,7 @@ void nodesDestroyNode(SNode* pNode) {
|
||||||
SNode* pElement = NULL;
|
SNode* pElement = NULL;
|
||||||
FOREACH(pElement, pPlan->pSubplans) {
|
FOREACH(pElement, pPlan->pSubplans) {
|
||||||
if (first) {
|
if (first) {
|
||||||
first = false;
|
// first = false;
|
||||||
nodesDestroyNode(pElement);
|
nodesDestroyNode(pElement);
|
||||||
} else {
|
} else {
|
||||||
nodesClearList(((SNodeListNode*)pElement)->pNodeList);
|
nodesClearList(((SNodeListNode*)pElement)->pNodeList);
|
||||||
|
|
|
@ -556,6 +556,7 @@ signed_literal(A) ::= TIMESTAMP NK_STRING(B).
|
||||||
signed_literal(A) ::= duration_literal(B). { A = releaseRawExprNode(pCxt, B); }
|
signed_literal(A) ::= duration_literal(B). { A = releaseRawExprNode(pCxt, B); }
|
||||||
signed_literal(A) ::= NULL(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &B); }
|
signed_literal(A) ::= NULL(B). { A = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &B); }
|
||||||
signed_literal(A) ::= literal_func(B). { A = releaseRawExprNode(pCxt, B); }
|
signed_literal(A) ::= literal_func(B). { A = releaseRawExprNode(pCxt, B); }
|
||||||
|
signed_literal(A) ::= NK_QUESTION(B). { A = createPlaceholderValueNode(pCxt, &B); }
|
||||||
|
|
||||||
%type literal_list { SNodeList* }
|
%type literal_list { SNodeList* }
|
||||||
%destructor literal_list { nodesDestroyList($$); }
|
%destructor literal_list { nodesDestroyList($$); }
|
||||||
|
|
|
@ -133,6 +133,9 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
|
||||||
assert(*p == TS_PATH_DELIMITER[0]);
|
assert(*p == TS_PATH_DELIMITER[0]);
|
||||||
|
|
||||||
int32_t dbLen = p - pTableName->z;
|
int32_t dbLen = p - pTableName->z;
|
||||||
|
if (dbLen <= 0) {
|
||||||
|
return buildInvalidOperationMsg(pMsgBuf, msg2);
|
||||||
|
}
|
||||||
char name[TSDB_DB_FNAME_LEN] = {0};
|
char name[TSDB_DB_FNAME_LEN] = {0};
|
||||||
strncpy(name, pTableName->z, dbLen);
|
strncpy(name, pTableName->z, dbLen);
|
||||||
dbLen = strdequote(name);
|
dbLen = strdequote(name);
|
||||||
|
|
|
@ -2173,14 +2173,28 @@ static int64_t getMonthsFromTimeVal(int64_t val, int32_t fromPrecision, char uni
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const char* getPrecisionStr(uint8_t precision) {
|
||||||
|
switch (precision) {
|
||||||
|
case TSDB_TIME_PRECISION_MILLI:
|
||||||
|
return TSDB_TIME_PRECISION_MILLI_STR;
|
||||||
|
case TSDB_TIME_PRECISION_MICRO:
|
||||||
|
return TSDB_TIME_PRECISION_MICRO_STR;
|
||||||
|
case TSDB_TIME_PRECISION_NANO:
|
||||||
|
return TSDB_TIME_PRECISION_NANO_STR;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return "unknown";
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* pInterval) {
|
static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* pInterval) {
|
||||||
uint8_t precision = ((SColumnNode*)pInterval->pCol)->node.resType.precision;
|
uint8_t precision = ((SColumnNode*)pInterval->pCol)->node.resType.precision;
|
||||||
|
|
||||||
SValueNode* pInter = (SValueNode*)pInterval->pInterval;
|
SValueNode* pInter = (SValueNode*)pInterval->pInterval;
|
||||||
bool valInter = TIME_IS_VAR_DURATION(pInter->unit);
|
bool valInter = TIME_IS_VAR_DURATION(pInter->unit);
|
||||||
if (pInter->datum.i <= 0 ||
|
if (pInter->datum.i <= 0 || (!valInter && pInter->datum.i < tsMinIntervalTime)) {
|
||||||
(!valInter && convertTimePrecision(pInter->datum.i, precision, TSDB_TIME_PRECISION_MICRO) < tsMinIntervalTime)) {
|
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL, tsMinIntervalTime,
|
||||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL, tsMinIntervalTime);
|
getPrecisionStr(precision));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NULL != pInterval->pOffset) {
|
if (NULL != pInterval->pOffset) {
|
||||||
|
@ -2754,6 +2768,11 @@ static int32_t translateInsertProject(STranslateContext* pCxt, SInsertStmt* pIns
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (NULL == pPrimaryKeyExpr) {
|
||||||
|
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM,
|
||||||
|
"Primary timestamp column can not be null");
|
||||||
|
}
|
||||||
|
|
||||||
return addOrderByPrimaryKeyToQuery(pCxt, pPrimaryKeyExpr, pInsert->pQuery);
|
return addOrderByPrimaryKeyToQuery(pCxt, pPrimaryKeyExpr, pInsert->pQuery);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2998,8 +3017,7 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
|
||||||
int32_t code =
|
int32_t code =
|
||||||
checkRangeOption(pCxt, "buffer", pOptions->buffer, TSDB_MIN_BUFFER_PER_VNODE, TSDB_MAX_BUFFER_PER_VNODE);
|
checkRangeOption(pCxt, "buffer", pOptions->buffer, TSDB_MIN_BUFFER_PER_VNODE, TSDB_MAX_BUFFER_PER_VNODE);
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = checkRangeOption(pCxt, "cacheLast", pOptions->cacheLast, TSDB_MIN_DB_CACHE_LAST,
|
code = checkRangeOption(pCxt, "cacheLast", pOptions->cacheLast, TSDB_MIN_DB_CACHE_LAST, TSDB_MAX_DB_CACHE_LAST);
|
||||||
TSDB_MAX_DB_CACHE_LAST);
|
|
||||||
}
|
}
|
||||||
if (TSDB_CODE_SUCCESS == code) {
|
if (TSDB_CODE_SUCCESS == code) {
|
||||||
code = checkRangeOption(pCxt, "cacheLastSize", pOptions->cacheLastSize, TSDB_MIN_DB_CACHE_LAST_SIZE,
|
code = checkRangeOption(pCxt, "cacheLastSize", pOptions->cacheLastSize, TSDB_MIN_DB_CACHE_LAST_SIZE,
|
||||||
|
|
|
@ -60,7 +60,7 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
||||||
case TSDB_CODE_PAR_EXPRIE_STATEMENT:
|
case TSDB_CODE_PAR_EXPRIE_STATEMENT:
|
||||||
return "This statement is no longer supported";
|
return "This statement is no longer supported";
|
||||||
case TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL:
|
case TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL:
|
||||||
return "Interval cannot be less than %d us";
|
return "Interval cannot be less than %d %s";
|
||||||
case TSDB_CODE_PAR_DB_NOT_SPECIFIED:
|
case TSDB_CODE_PAR_DB_NOT_SPECIFIED:
|
||||||
return "Database not specified";
|
return "Database not specified";
|
||||||
case TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME:
|
case TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME:
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -316,34 +316,34 @@ typedef struct SQWorkerMgmt {
|
||||||
#define QW_LOCK(type, _lock) \
|
#define QW_LOCK(type, _lock) \
|
||||||
do { \
|
do { \
|
||||||
if (QW_READ == (type)) { \
|
if (QW_READ == (type)) { \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
QW_LOCK_DEBUG("QW RLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
QW_LOCK_DEBUG("QW RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosRLockLatch(_lock); \
|
taosRLockLatch(_lock); \
|
||||||
QW_LOCK_DEBUG("QW RLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
QW_LOCK_DEBUG("QW RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) > 0); \
|
assert(atomic_load_32((_lock)) > 0); \
|
||||||
} else { \
|
} else { \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
QW_LOCK_DEBUG("QW WLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
QW_LOCK_DEBUG("QW WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosWLockLatch(_lock); \
|
taosWLockLatch(_lock); \
|
||||||
QW_LOCK_DEBUG("QW WLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
QW_LOCK_DEBUG("QW WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
assert(atomic_load_32((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define QW_UNLOCK(type, _lock) \
|
#define QW_UNLOCK(type, _lock) \
|
||||||
do { \
|
do { \
|
||||||
if (QW_READ == (type)) { \
|
if (QW_READ == (type)) { \
|
||||||
assert(atomic_load_64((_lock)) > 0); \
|
assert(atomic_load_32((_lock)) > 0); \
|
||||||
QW_LOCK_DEBUG("QW RULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
QW_LOCK_DEBUG("QW RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosRUnLockLatch(_lock); \
|
taosRUnLockLatch(_lock); \
|
||||||
QW_LOCK_DEBUG("QW RULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
QW_LOCK_DEBUG("QW RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
} else { \
|
} else { \
|
||||||
assert(atomic_load_64((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
assert(atomic_load_32((_lock)) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||||
QW_LOCK_DEBUG("QW WULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
QW_LOCK_DEBUG("QW WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosWUnLockLatch(_lock); \
|
taosWUnLockLatch(_lock); \
|
||||||
QW_LOCK_DEBUG("QW WULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
QW_LOCK_DEBUG("QW WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
|
@ -367,33 +367,33 @@ extern SSchedulerMgmt schMgmt;
|
||||||
|
|
||||||
#define SCH_LOCK(type, _lock) do { \
|
#define SCH_LOCK(type, _lock) do { \
|
||||||
if (SCH_READ == (type)) { \
|
if (SCH_READ == (type)) { \
|
||||||
assert(atomic_load_64(_lock) >= 0); \
|
assert(atomic_load_32(_lock) >= 0); \
|
||||||
SCH_LOCK_DEBUG("SCH RLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosRLockLatch(_lock); \
|
taosRLockLatch(_lock); \
|
||||||
SCH_LOCK_DEBUG("SCH RLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64(_lock) > 0); \
|
assert(atomic_load_32(_lock) > 0); \
|
||||||
} else { \
|
} else { \
|
||||||
assert(atomic_load_64(_lock) >= 0); \
|
assert(atomic_load_32(_lock) >= 0); \
|
||||||
SCH_LOCK_DEBUG("SCH WLOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosWLockLatch(_lock); \
|
taosWLockLatch(_lock); \
|
||||||
SCH_LOCK_DEBUG("SCH WLOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64(_lock) & TD_RWLATCH_WRITE_FLAG_COPY); \
|
assert(atomic_load_32(_lock) == TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define SCH_UNLOCK(type, _lock) do { \
|
#define SCH_UNLOCK(type, _lock) do { \
|
||||||
if (SCH_READ == (type)) { \
|
if (SCH_READ == (type)) { \
|
||||||
assert(atomic_load_64((_lock)) > 0); \
|
assert(atomic_load_32((_lock)) > 0); \
|
||||||
SCH_LOCK_DEBUG("SCH RULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosRUnLockLatch(_lock); \
|
taosRUnLockLatch(_lock); \
|
||||||
SCH_LOCK_DEBUG("SCH RULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
} else { \
|
} else { \
|
||||||
assert(atomic_load_64((_lock)) & TD_RWLATCH_WRITE_FLAG_COPY); \
|
assert(atomic_load_32((_lock)) & TD_RWLATCH_WRITE_FLAG_COPY); \
|
||||||
SCH_LOCK_DEBUG("SCH WULOCK%p:%" PRIx64 ", %s:%d B", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
taosWUnLockLatch(_lock); \
|
taosWUnLockLatch(_lock); \
|
||||||
SCH_LOCK_DEBUG("SCH WULOCK%p:%" PRIx64 ", %s:%d E", (_lock), atomic_load_64(_lock), __FILE__, __LINE__); \
|
SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
|
||||||
assert(atomic_load_64((_lock)) >= 0); \
|
assert(atomic_load_32((_lock)) >= 0); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,6 @@ int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *
|
||||||
if (NULL == pTask->execNodes || NULL == pTask->profile.execTime) {
|
if (NULL == pTask->execNodes || NULL == pTask->profile.execTime) {
|
||||||
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||||
}
|
}
|
||||||
taosInitReentrantRWLatch(&pTask->lock);
|
|
||||||
|
|
||||||
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_INIT);
|
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_INIT);
|
||||||
|
|
||||||
|
@ -260,7 +259,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
|
||||||
SSchTask *parent = *(SSchTask **)taosArrayGet(pTask->parents, i);
|
SSchTask *parent = *(SSchTask **)taosArrayGet(pTask->parents, i);
|
||||||
int32_t readyNum = atomic_add_fetch_32(&parent->childReady, 1);
|
int32_t readyNum = atomic_add_fetch_32(&parent->childReady, 1);
|
||||||
|
|
||||||
SCH_LOCK_TASK(parent);
|
SCH_LOCK(SCH_WRITE, &parent->planLock);
|
||||||
SDownstreamSourceNode source = {
|
SDownstreamSourceNode source = {
|
||||||
.type = QUERY_NODE_DOWNSTREAM_SOURCE,
|
.type = QUERY_NODE_DOWNSTREAM_SOURCE,
|
||||||
.taskId = pTask->taskId,
|
.taskId = pTask->taskId,
|
||||||
|
@ -270,7 +269,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
|
||||||
.fetchMsgType = SCH_FETCH_TYPE(pTask),
|
.fetchMsgType = SCH_FETCH_TYPE(pTask),
|
||||||
};
|
};
|
||||||
qSetSubplanExecutionNode(parent->plan, pTask->plan->id.groupId, &source);
|
qSetSubplanExecutionNode(parent->plan, pTask->plan->id.groupId, &source);
|
||||||
SCH_UNLOCK_TASK(parent);
|
SCH_UNLOCK(SCH_WRITE, &parent->planLock);
|
||||||
|
|
||||||
if (SCH_TASK_READY_FOR_LAUNCH(readyNum, parent)) {
|
if (SCH_TASK_READY_FOR_LAUNCH(readyNum, parent)) {
|
||||||
SCH_TASK_DLOG("all %d children task done, start to launch parent task 0x%" PRIx64, readyNum, parent->taskId);
|
SCH_TASK_DLOG("all %d children task done, start to launch parent task 0x%" PRIx64, readyNum, parent->taskId);
|
||||||
|
|
|
@ -30,7 +30,7 @@ extern "C" {
|
||||||
|
|
||||||
#define TIMER_MAX_MS 0x7FFFFFFF
|
#define TIMER_MAX_MS 0x7FFFFFFF
|
||||||
#define ENV_TICK_TIMER_MS 1000
|
#define ENV_TICK_TIMER_MS 1000
|
||||||
#define PING_TIMER_MS 1000
|
#define PING_TIMER_MS 5000
|
||||||
#define ELECT_TIMER_MS_MIN 1300
|
#define ELECT_TIMER_MS_MIN 1300
|
||||||
#define ELECT_TIMER_MS_MAX (ELECT_TIMER_MS_MIN * 2)
|
#define ELECT_TIMER_MS_MAX (ELECT_TIMER_MS_MIN * 2)
|
||||||
#define ELECT_TIMER_MS_RANGE (ELECT_TIMER_MS_MAX - ELECT_TIMER_MS_MIN)
|
#define ELECT_TIMER_MS_RANGE (ELECT_TIMER_MS_MAX - ELECT_TIMER_MS_MIN)
|
||||||
|
|
|
@ -273,16 +273,8 @@ int32_t syncLeaderTransfer(int64_t rid) {
|
||||||
}
|
}
|
||||||
ASSERT(rid == pSyncNode->rid);
|
ASSERT(rid == pSyncNode->rid);
|
||||||
|
|
||||||
if (pSyncNode->peersNum == 0) {
|
int32_t ret = syncNodeLeaderTransfer(pSyncNode);
|
||||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||||
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
SNodeInfo newLeader = (pSyncNode->peersNodeInfo)[0];
|
|
||||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
|
||||||
|
|
||||||
int32_t ret = syncLeaderTransferTo(rid, newLeader);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,25 +285,8 @@ int32_t syncLeaderTransferTo(int64_t rid, SNodeInfo newLeader) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
ASSERT(rid == pSyncNode->rid);
|
ASSERT(rid == pSyncNode->rid);
|
||||||
int32_t ret = 0;
|
|
||||||
|
|
||||||
if (pSyncNode->replicaNum == 1) {
|
int32_t ret = syncNodeLeaderTransferTo(pSyncNode, newLeader);
|
||||||
sError("only one replica, cannot drop leader");
|
|
||||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
|
||||||
terrno = TSDB_CODE_SYN_ONE_REPLICA;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
SyncLeaderTransfer* pMsg = syncLeaderTransferBuild(pSyncNode->vgId);
|
|
||||||
pMsg->newLeaderId.addr = syncUtilAddr2U64(newLeader.nodeFqdn, newLeader.nodePort);
|
|
||||||
pMsg->newLeaderId.vgId = pSyncNode->vgId;
|
|
||||||
pMsg->newNodeInfo = newLeader;
|
|
||||||
ASSERT(pMsg != NULL);
|
|
||||||
SRpcMsg rpcMsg = {0};
|
|
||||||
syncLeaderTransfer2RpcMsg(pMsg, &rpcMsg);
|
|
||||||
syncLeaderTransferDestroy(pMsg);
|
|
||||||
|
|
||||||
ret = syncNodePropose(pSyncNode, &rpcMsg, false);
|
|
||||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -337,6 +312,12 @@ int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
do {
|
||||||
|
char logBuf[128];
|
||||||
|
snprintf(logBuf, sizeof(logBuf), "begin leader transfer to %s:%u", newLeader.nodeFqdn, newLeader.nodePort);
|
||||||
|
syncNodeEventLog(pSyncNode, logBuf);
|
||||||
|
} while (0);
|
||||||
|
|
||||||
SyncLeaderTransfer* pMsg = syncLeaderTransferBuild(pSyncNode->vgId);
|
SyncLeaderTransfer* pMsg = syncLeaderTransferBuild(pSyncNode->vgId);
|
||||||
pMsg->newLeaderId.addr = syncUtilAddr2U64(newLeader.nodeFqdn, newLeader.nodePort);
|
pMsg->newLeaderId.addr = syncUtilAddr2U64(newLeader.nodeFqdn, newLeader.nodePort);
|
||||||
pMsg->newLeaderId.vgId = pSyncNode->vgId;
|
pMsg->newLeaderId.vgId = pSyncNode->vgId;
|
||||||
|
@ -1118,19 +1099,13 @@ void syncNodeStart(SSyncNode* pSyncNode) {
|
||||||
// Raft 3.6.2 Committing entries from previous terms
|
// Raft 3.6.2 Committing entries from previous terms
|
||||||
syncNodeAppendNoop(pSyncNode);
|
syncNodeAppendNoop(pSyncNode);
|
||||||
syncMaybeAdvanceCommitIndex(pSyncNode);
|
syncMaybeAdvanceCommitIndex(pSyncNode);
|
||||||
|
} else {
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
syncNodeBecomeFollower(pSyncNode, "first start");
|
syncNodeBecomeFollower(pSyncNode, "first start");
|
||||||
|
|
||||||
// int32_t ret = 0;
|
|
||||||
// ret = syncNodeStartPingTimer(pSyncNode);
|
|
||||||
// ASSERT(ret == 0);
|
|
||||||
|
|
||||||
if (gRaftDetailLog) {
|
|
||||||
syncNodeLog2("==state change become leader immediately==", pSyncNode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t ret = 0;
|
||||||
|
ret = syncNodeStartPingTimer(pSyncNode);
|
||||||
|
ASSERT(ret == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void syncNodeStartStandBy(SSyncNode* pSyncNode) {
|
void syncNodeStartStandBy(SSyncNode* pSyncNode) {
|
||||||
|
@ -1147,8 +1122,6 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode) {
|
||||||
void syncNodeClose(SSyncNode* pSyncNode) {
|
void syncNodeClose(SSyncNode* pSyncNode) {
|
||||||
syncNodeEventLog(pSyncNode, "sync close");
|
syncNodeEventLog(pSyncNode, "sync close");
|
||||||
|
|
||||||
// leader transfer
|
|
||||||
|
|
||||||
int32_t ret;
|
int32_t ret;
|
||||||
ASSERT(pSyncNode != NULL);
|
ASSERT(pSyncNode != NULL);
|
||||||
|
|
||||||
|
@ -1183,14 +1156,6 @@ void syncNodeClose(SSyncNode* pSyncNode) {
|
||||||
pSyncNode->pNewNodeReceiver = NULL;
|
pSyncNode->pNewNodeReceiver = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
if (pSyncNode->pSnapshot != NULL) {
|
|
||||||
taosMemoryFree(pSyncNode->pSnapshot);
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// tsem_destroy(&pSyncNode->restoreSem);
|
|
||||||
|
|
||||||
// free memory in syncFreeNode
|
// free memory in syncFreeNode
|
||||||
// taosMemoryFree(pSyncNode);
|
// taosMemoryFree(pSyncNode);
|
||||||
}
|
}
|
||||||
|
@ -1255,7 +1220,7 @@ int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode) {
|
||||||
&pSyncNode->pPingTimer);
|
&pSyncNode->pPingTimer);
|
||||||
atomic_store_64(&pSyncNode->pingTimerLogicClock, pSyncNode->pingTimerLogicClockUser);
|
atomic_store_64(&pSyncNode->pingTimerLogicClock, pSyncNode->pingTimerLogicClockUser);
|
||||||
} else {
|
} else {
|
||||||
sError("sync env is stop, syncNodeStartPingTimer");
|
sError("vgId:%d, start ping timer error, sync env is stop", pSyncNode->vgId);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1276,7 +1241,7 @@ int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms) {
|
||||||
&pSyncNode->pElectTimer);
|
&pSyncNode->pElectTimer);
|
||||||
atomic_store_64(&pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser);
|
atomic_store_64(&pSyncNode->electTimerLogicClock, pSyncNode->electTimerLogicClockUser);
|
||||||
} else {
|
} else {
|
||||||
sError("sync env is stop, syncNodeStartElectTimer");
|
sError("vgId:%d, start elect timer error, sync env is stop", pSyncNode->vgId);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1316,7 +1281,7 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||||
&pSyncNode->pHeartbeatTimer);
|
&pSyncNode->pHeartbeatTimer);
|
||||||
atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser);
|
atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser);
|
||||||
} else {
|
} else {
|
||||||
sError("sync env is stop, syncNodeStartHeartbeatTimer");
|
sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2643,7 +2608,7 @@ const char* syncStr(ESyncState state) {
|
||||||
static int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry) {
|
static int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry) {
|
||||||
SyncLeaderTransfer* pSyncLeaderTransfer = syncLeaderTransferFromRpcMsg2(pRpcMsg);
|
SyncLeaderTransfer* pSyncLeaderTransfer = syncLeaderTransferFromRpcMsg2(pRpcMsg);
|
||||||
|
|
||||||
syncNodeEventLog(ths, "begin leader transfer");
|
syncNodeEventLog(ths, "do leader transfer");
|
||||||
|
|
||||||
bool sameId = syncUtilSameId(&(pSyncLeaderTransfer->newLeaderId), &(ths->myRaftId));
|
bool sameId = syncUtilSameId(&(pSyncLeaderTransfer->newLeaderId), &(ths->myRaftId));
|
||||||
bool sameNodeInfo = strcmp(pSyncLeaderTransfer->newNodeInfo.nodeFqdn, ths->myNodeInfo.nodeFqdn) == 0 &&
|
bool sameNodeInfo = strcmp(pSyncLeaderTransfer->newNodeInfo.nodeFqdn, ths->myNodeInfo.nodeFqdn) == 0 &&
|
||||||
|
|
|
@ -17,6 +17,11 @@
|
||||||
#include "syncElection.h"
|
#include "syncElection.h"
|
||||||
#include "syncReplication.h"
|
#include "syncReplication.h"
|
||||||
|
|
||||||
|
int32_t syncNodeTimerRoutine(SSyncNode* ths) {
|
||||||
|
syncNodeEventLog(ths, "timer routines ... ");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
||||||
int32_t ret = 0;
|
int32_t ret = 0;
|
||||||
syncTimeoutLog2("==syncNodeOnTimeoutCb==", pMsg);
|
syncTimeoutLog2("==syncNodeOnTimeoutCb==", pMsg);
|
||||||
|
@ -24,8 +29,11 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
||||||
if (pMsg->timeoutType == SYNC_TIMEOUT_PING) {
|
if (pMsg->timeoutType == SYNC_TIMEOUT_PING) {
|
||||||
if (atomic_load_64(&ths->pingTimerLogicClockUser) <= pMsg->logicClock) {
|
if (atomic_load_64(&ths->pingTimerLogicClockUser) <= pMsg->logicClock) {
|
||||||
++(ths->pingTimerCounter);
|
++(ths->pingTimerCounter);
|
||||||
|
|
||||||
// syncNodePingAll(ths);
|
// syncNodePingAll(ths);
|
||||||
syncNodePingPeers(ths);
|
// syncNodePingPeers(ths);
|
||||||
|
|
||||||
|
syncNodeTimerRoutine(ths);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
|
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
|
||||||
|
@ -40,7 +48,7 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
|
||||||
syncNodeReplicate(ths);
|
syncNodeReplicate(ths);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sTrace("unknown timeoutType:%d", pMsg->timeoutType);
|
sError("vgId:%d, unknown timeout-type:%d", ths->vgId, pMsg->timeoutType);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -17,10 +17,8 @@
|
||||||
#include "tlockfree.h"
|
#include "tlockfree.h"
|
||||||
|
|
||||||
#define TD_RWLATCH_WRITE_FLAG 0x40000000
|
#define TD_RWLATCH_WRITE_FLAG 0x40000000
|
||||||
#define TD_RWLATCH_REENTRANT_FLAG 0x4000000000000000
|
|
||||||
|
|
||||||
void taosInitRWLatch(SRWLatch *pLatch) { *pLatch = 0; }
|
void taosInitRWLatch(SRWLatch *pLatch) { *pLatch = 0; }
|
||||||
void taosInitReentrantRWLatch(SRWLatch *pLatch) { *pLatch = TD_RWLATCH_REENTRANT_FLAG; }
|
|
||||||
|
|
||||||
void taosWLockLatch(SRWLatch *pLatch) {
|
void taosWLockLatch(SRWLatch *pLatch) {
|
||||||
SRWLatch oLatch, nLatch;
|
SRWLatch oLatch, nLatch;
|
||||||
|
@ -28,14 +26,8 @@ void taosWLockLatch(SRWLatch *pLatch) {
|
||||||
|
|
||||||
// Set write flag
|
// Set write flag
|
||||||
while (1) {
|
while (1) {
|
||||||
oLatch = atomic_load_64(pLatch);
|
oLatch = atomic_load_32(pLatch);
|
||||||
if (oLatch & TD_RWLATCH_WRITE_FLAG) {
|
if (oLatch & TD_RWLATCH_WRITE_FLAG) {
|
||||||
if (oLatch & TD_RWLATCH_REENTRANT_FLAG) {
|
|
||||||
nLatch = (((oLatch >> 32) + 1) << 32) | (oLatch & 0xFFFFFFFF);
|
|
||||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) break;
|
|
||||||
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
nLoops++;
|
nLoops++;
|
||||||
if (nLoops > 1000) {
|
if (nLoops > 1000) {
|
||||||
sched_yield();
|
sched_yield();
|
||||||
|
@ -45,14 +37,14 @@ void taosWLockLatch(SRWLatch *pLatch) {
|
||||||
}
|
}
|
||||||
|
|
||||||
nLatch = oLatch | TD_RWLATCH_WRITE_FLAG;
|
nLatch = oLatch | TD_RWLATCH_WRITE_FLAG;
|
||||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) break;
|
if (atomic_val_compare_exchange_32(pLatch, oLatch, nLatch) == oLatch) break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for all reads end
|
// wait for all reads end
|
||||||
nLoops = 0;
|
nLoops = 0;
|
||||||
while (1) {
|
while (1) {
|
||||||
oLatch = atomic_load_64(pLatch);
|
oLatch = atomic_load_32(pLatch);
|
||||||
if (0 == (oLatch & 0xFFFFFFF)) break;
|
if (oLatch == TD_RWLATCH_WRITE_FLAG) break;
|
||||||
nLoops++;
|
nLoops++;
|
||||||
if (nLoops > 1000) {
|
if (nLoops > 1000) {
|
||||||
sched_yield();
|
sched_yield();
|
||||||
|
@ -64,47 +56,27 @@ void taosWLockLatch(SRWLatch *pLatch) {
|
||||||
// no reentrant
|
// no reentrant
|
||||||
int32_t taosWTryLockLatch(SRWLatch *pLatch) {
|
int32_t taosWTryLockLatch(SRWLatch *pLatch) {
|
||||||
SRWLatch oLatch, nLatch;
|
SRWLatch oLatch, nLatch;
|
||||||
oLatch = atomic_load_64(pLatch);
|
oLatch = atomic_load_32(pLatch);
|
||||||
if (oLatch << 2) {
|
if (oLatch) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
nLatch = oLatch | TD_RWLATCH_WRITE_FLAG;
|
nLatch = oLatch | TD_RWLATCH_WRITE_FLAG;
|
||||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) {
|
if (atomic_val_compare_exchange_32(pLatch, oLatch, nLatch) == oLatch) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosWUnLockLatch(SRWLatch *pLatch) {
|
void taosWUnLockLatch(SRWLatch *pLatch) { atomic_store_32(pLatch, 0); }
|
||||||
SRWLatch oLatch, nLatch, wLatch;
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
oLatch = atomic_load_64(pLatch);
|
|
||||||
|
|
||||||
if (0 == (oLatch & TD_RWLATCH_REENTRANT_FLAG)) {
|
|
||||||
atomic_store_64(pLatch, 0);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
wLatch = ((oLatch << 2) >> 34);
|
|
||||||
if (wLatch) {
|
|
||||||
nLatch = ((--wLatch) << 32) | TD_RWLATCH_REENTRANT_FLAG | TD_RWLATCH_WRITE_FLAG;
|
|
||||||
} else {
|
|
||||||
nLatch = TD_RWLATCH_REENTRANT_FLAG;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void taosRLockLatch(SRWLatch *pLatch) {
|
void taosRLockLatch(SRWLatch *pLatch) {
|
||||||
SRWLatch oLatch, nLatch;
|
SRWLatch oLatch, nLatch;
|
||||||
int32_t nLoops = 0;
|
int32_t nLoops = 0;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
oLatch = atomic_load_64(pLatch);
|
oLatch = atomic_load_32(pLatch);
|
||||||
if (oLatch & TD_RWLATCH_WRITE_FLAG) {
|
if (oLatch & TD_RWLATCH_WRITE_FLAG) {
|
||||||
nLoops++;
|
nLoops++;
|
||||||
if (nLoops > 1000) {
|
if (nLoops > 1000) {
|
||||||
|
@ -115,8 +87,8 @@ void taosRLockLatch(SRWLatch *pLatch) {
|
||||||
}
|
}
|
||||||
|
|
||||||
nLatch = oLatch + 1;
|
nLatch = oLatch + 1;
|
||||||
if (atomic_val_compare_exchange_64(pLatch, oLatch, nLatch) == oLatch) break;
|
if (atomic_val_compare_exchange_32(pLatch, oLatch, nLatch) == oLatch) break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosRUnLockLatch(SRWLatch *pLatch) { atomic_fetch_sub_64(pLatch, 1); }
|
void taosRUnLockLatch(SRWLatch *pLatch) { atomic_fetch_sub_32(pLatch, 1); }
|
||||||
|
|
|
@ -0,0 +1,193 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/deploy.sh -n dnode2 -i 2
|
||||||
|
system sh/deploy.sh -n dnode3 -i 3
|
||||||
|
system sh/deploy.sh -n dnode4 -i 4
|
||||||
|
|
||||||
|
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
system sh/exec.sh -n dnode2 -s start
|
||||||
|
system sh/exec.sh -n dnode3 -s start
|
||||||
|
system sh/exec.sh -n dnode4 -s start
|
||||||
|
|
||||||
|
$loop_cnt = 0
|
||||||
|
check_dnode_ready:
|
||||||
|
$loop_cnt = $loop_cnt + 1
|
||||||
|
sleep 200
|
||||||
|
if $loop_cnt == 10 then
|
||||||
|
print ====> dnode not ready!
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql show dnodes
|
||||||
|
print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||||
|
print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||||
|
print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
|
||||||
|
print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
|
||||||
|
if $data[0][0] != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[0][4] != ready then
|
||||||
|
goto check_dnode_ready
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql connect
|
||||||
|
sql create dnode $hostname port 7200
|
||||||
|
sql create dnode $hostname port 7300
|
||||||
|
sql create dnode $hostname port 7400
|
||||||
|
|
||||||
|
$loop_cnt = 0
|
||||||
|
check_dnode_ready_1:
|
||||||
|
$loop_cnt = $loop_cnt + 1
|
||||||
|
sleep 200
|
||||||
|
if $loop_cnt == 10 then
|
||||||
|
print ====> dnodes not ready!
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql show dnodes
|
||||||
|
print ===> $rows $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
|
||||||
|
print ===> $rows $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
|
||||||
|
print ===> $rows $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
|
||||||
|
print ===> $rows $data[3][0] $data[3][1] $data[3][2] $data[3][3] $data[3][4] $data[3][5] $data[3][6]
|
||||||
|
if $data[0][4] != ready then
|
||||||
|
goto check_dnode_ready_1
|
||||||
|
endi
|
||||||
|
if $data[1][4] != ready then
|
||||||
|
goto check_dnode_ready_1
|
||||||
|
endi
|
||||||
|
if $data[2][4] != ready then
|
||||||
|
goto check_dnode_ready_1
|
||||||
|
endi
|
||||||
|
if $data[3][4] != ready then
|
||||||
|
goto check_dnode_ready_1
|
||||||
|
endi
|
||||||
|
|
||||||
|
$replica = 3
|
||||||
|
$vgroups = 1
|
||||||
|
|
||||||
|
print ============= create database
|
||||||
|
sql create database db replica $replica vgroups $vgroups
|
||||||
|
|
||||||
|
$loop_cnt = 0
|
||||||
|
check_db_ready:
|
||||||
|
$loop_cnt = $loop_cnt + 1
|
||||||
|
sleep 200
|
||||||
|
if $loop_cnt == 100 then
|
||||||
|
print ====> db not ready!
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
sql show databases
|
||||||
|
print ===> rows: $rows
|
||||||
|
print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19]
|
||||||
|
if $rows != 3 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data[2][19] != ready then
|
||||||
|
goto check_db_ready
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql use db
|
||||||
|
|
||||||
|
$loop_cnt = 0
|
||||||
|
check_vg_ready:
|
||||||
|
$loop_cnt = $loop_cnt + 1
|
||||||
|
sleep 200
|
||||||
|
if $loop_cnt == 300 then
|
||||||
|
print ====> vgroups not ready!
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql show vgroups
|
||||||
|
print ===> rows: $rows
|
||||||
|
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
|
||||||
|
|
||||||
|
if $rows != $vgroups then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data[0][4] == leader then
|
||||||
|
if $data[0][6] == follower then
|
||||||
|
if $data[0][8] == follower then
|
||||||
|
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
|
||||||
|
endi
|
||||||
|
endi
|
||||||
|
elif $data[0][6] == leader then
|
||||||
|
if $data[0][4] == follower then
|
||||||
|
if $data[0][8] == follower then
|
||||||
|
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
|
||||||
|
endi
|
||||||
|
endi
|
||||||
|
elif $data[0][8] == leader then
|
||||||
|
if $data[0][4] == follower then
|
||||||
|
if $data[0][6] == follower then
|
||||||
|
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
|
||||||
|
endi
|
||||||
|
endi
|
||||||
|
else
|
||||||
|
goto check_vg_ready
|
||||||
|
endi
|
||||||
|
|
||||||
|
|
||||||
|
vg_ready:
|
||||||
|
print ====> create stable/child table
|
||||||
|
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int)
|
||||||
|
|
||||||
|
sql show stables
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql create table ct1 using stb tags(1000)
|
||||||
|
|
||||||
|
|
||||||
|
print ===> write 100 records
|
||||||
|
$N = 100
|
||||||
|
$count = 0
|
||||||
|
while $count < $N
|
||||||
|
$ms = 1591200000000 + $count
|
||||||
|
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
|
||||||
|
$count = $count + 1
|
||||||
|
endw
|
||||||
|
|
||||||
|
|
||||||
|
#sql flush database db;
|
||||||
|
|
||||||
|
|
||||||
|
sleep 3000
|
||||||
|
|
||||||
|
|
||||||
|
print ===> stop dnode1 dnode2 dnode3 dnode4
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||||
|
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||||
|
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
########################################################
|
||||||
|
print ===> start dnode1 dnode2 dnode3 dnode4
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
system sh/exec.sh -n dnode2 -s start
|
||||||
|
system sh/exec.sh -n dnode3 -s start
|
||||||
|
system sh/exec.sh -n dnode4 -s start
|
||||||
|
|
||||||
|
sleep 3000
|
||||||
|
|
||||||
|
print =============== query data
|
||||||
|
sql connect
|
||||||
|
sql use db
|
||||||
|
sql select * from ct1
|
||||||
|
print rows: $rows
|
||||||
|
print $data00 $data01 $data02
|
||||||
|
if $rows != 100 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
#system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||||
|
#system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||||
|
#system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||||
|
#########################################################
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,176 @@
|
||||||
|
# author : wenzhouwww
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
self.row_nums = 10
|
||||||
|
self.tb_nums = 10
|
||||||
|
self.ts = 1537146000000
|
||||||
|
|
||||||
|
def prepare_datas(self, stb_name , tb_nums , row_nums ):
|
||||||
|
tdSql.execute(" use db ")
|
||||||
|
tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
|
||||||
|
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
|
||||||
|
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
|
||||||
|
|
||||||
|
for i in range(tb_nums):
|
||||||
|
tbname = f"sub_{stb_name}_{i}"
|
||||||
|
ts = self.ts + i*10000
|
||||||
|
tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
|
||||||
|
|
||||||
|
for row in range(row_nums):
|
||||||
|
ts = self.ts + row*1000
|
||||||
|
tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )")
|
||||||
|
|
||||||
|
for null in range(5):
|
||||||
|
ts = self.ts + row_nums*1000 + null*1000
|
||||||
|
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
|
||||||
|
|
||||||
|
def basic_query(self):
|
||||||
|
tdSql.query("select count(*) from stb")
|
||||||
|
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
|
||||||
|
tdSql.query("select count(c1) from stb")
|
||||||
|
tdSql.checkData(0,0,(self.row_nums )*self.tb_nums)
|
||||||
|
tdSql.query(" select tbname , count(*) from stb partition by tbname ")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.query(" select count(c1) from stb group by t1 order by t1 ")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.error(" select count(c1) from stb group by c1 order by t1 ")
|
||||||
|
tdSql.error(" select count(t1) from stb group by c1 order by t1 ")
|
||||||
|
tdSql.query(" select count(c1) from stb group by tbname order by tbname ")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
# bug need fix
|
||||||
|
# tdSql.query(" select count(t1) from stb group by t2 order by t2 ")
|
||||||
|
# tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.query(" select count(c1) from stb group by c1 order by c1 ")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
tdSql.query(" select c1 , count(c1) from stb group by c1 order by c1 ")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) from stb group by abs(c1) order by abs(c1)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
tdSql.query("select abs(c1+c3), count(c1+c3) from stb group by abs(c1+c3) order by abs(c1+c3)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
tdSql.query("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2")
|
||||||
|
tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2")
|
||||||
|
tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+count(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) , count(t2) from stb where abs(c1+t2)=1 partition by tbname")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.query("select count(c1) from stb where abs(c1+t2)=1 partition by tbname")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query("select tbname , count(c1) from stb partition by tbname order by tbname")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.checkData(0,1,self.row_nums)
|
||||||
|
|
||||||
|
tdSql.error("select tbname , count(c1) from stb partition by t1 order by t1")
|
||||||
|
tdSql.error("select tbname , count(t1) from stb partition by t1 order by t1")
|
||||||
|
tdSql.error("select tbname , count(t1) from stb partition by t2 order by t2")
|
||||||
|
|
||||||
|
# # bug need fix
|
||||||
|
# tdSql.query("select t2 , count(t1) from stb partition by t2 order by t2")
|
||||||
|
# tdSql.checkRows(self.tb_nums)
|
||||||
|
|
||||||
|
tdSql.query("select tbname , count(c1) from stb partition by tbname order by tbname")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.checkData(0,1,self.row_nums)
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.error("select tbname , count(c1) from stb partition by t2 order by t2")
|
||||||
|
|
||||||
|
tdSql.query("select c2, count(c1) from stb partition by c2 order by c2 desc")
|
||||||
|
tdSql.checkRows(self.tb_nums+1)
|
||||||
|
tdSql.checkData(0,1,self.tb_nums)
|
||||||
|
|
||||||
|
tdSql.error("select tbname , count(c1) from stb partition by c1 order by c2")
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2")
|
||||||
|
tdSql.checkRows(self.tb_nums*(self.row_nums+5))
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) , count(t2) from stb partition by c2 ")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
tdSql.checkData(0,1,self.row_nums)
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) , count(t2) ,c2 from stb partition by c2 order by c2")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.checkCols(4)
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) , count(t2) ,t1 from stb partition by t1 order by t1")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.checkData(0,0,self.row_nums)
|
||||||
|
|
||||||
|
# bug need fix
|
||||||
|
# tdSql.query("select count(c1) , count(t1) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
|
||||||
|
# tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select count(ceil(c2)) , count(floor(t2)) ,count(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select count(ceil(c1-2)) , count(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
|
||||||
|
# interval
|
||||||
|
tdSql.query("select count(c1) from stb interval(2s) sliding(1s)")
|
||||||
|
|
||||||
|
# bug need fix
|
||||||
|
|
||||||
|
tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
|
||||||
|
|
||||||
|
tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
|
||||||
|
|
||||||
|
tdSql.query("select tbname , count(c1) from stb partition by tbname interval(10s)")
|
||||||
|
|
||||||
|
tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
|
||||||
|
tdSql.checkData(0,0,'sub_stb_1')
|
||||||
|
tdSql.checkData(0,1,self.row_nums)
|
||||||
|
|
||||||
|
# tdSql.query(" select tbname , count(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
|
||||||
|
# tdSql.checkRows(5)
|
||||||
|
|
||||||
|
# tdSql.query(" select tbname , count(c1) from stb partition by tbname order by tbname slimit 5 soffset 1 ")
|
||||||
|
# tdSql.checkRows(5)
|
||||||
|
|
||||||
|
tdSql.query(" select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
|
||||||
|
|
||||||
|
tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 partition by tbname interval(50s) sliding(30s)')
|
||||||
|
tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 interval(50s) sliding(30s)')
|
||||||
|
tdSql.query(f'select tbname , count(c1) from stb where ts>={self.ts} and ts < {self.ts}+10000 partition by tbname interval(50s) sliding(30s)')
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
self.prepare_datas("stb",self.tb_nums,self.row_nums)
|
||||||
|
self.basic_query()
|
||||||
|
|
||||||
|
# # coverage case for taosd crash about bug fix
|
||||||
|
tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
|
||||||
|
tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
|
||||||
|
tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
|
||||||
|
tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
|
||||||
|
tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,189 @@
|
||||||
|
# author : wenzhouwww
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
self.row_nums = 10
|
||||||
|
self.tb_nums = 10
|
||||||
|
self.ts = 1537146000000
|
||||||
|
|
||||||
|
def prepare_datas(self, stb_name , tb_nums , row_nums ):
|
||||||
|
tdSql.execute(" use db ")
|
||||||
|
tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
|
||||||
|
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
|
||||||
|
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
|
||||||
|
|
||||||
|
for i in range(tb_nums):
|
||||||
|
tbname = f"sub_{stb_name}_{i}"
|
||||||
|
ts = self.ts + i*10000
|
||||||
|
tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
|
||||||
|
|
||||||
|
for row in range(row_nums):
|
||||||
|
ts = self.ts + row*1000
|
||||||
|
tdSql.execute(f"insert into {tbname} values({ts} , {row} , {row} , {row} , {row} , 1 , 2 , 'true' , 'binary_{row}' , 'nchar_{row}' , {row} , {row} , 1 ,2 )")
|
||||||
|
|
||||||
|
for null in range(5):
|
||||||
|
ts = self.ts + row_nums*1000 + null*1000
|
||||||
|
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
|
||||||
|
|
||||||
|
def basic_query(self):
|
||||||
|
tdSql.query("select count(*) from stb")
|
||||||
|
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
|
||||||
|
tdSql.query("select max(c1) from stb")
|
||||||
|
tdSql.checkData(0,0,(self.row_nums -1))
|
||||||
|
tdSql.query(" select tbname , max(c1) from stb partition by tbname ")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.query(" select max(c1) from stb group by t1 order by t1 ")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.query(" select max(c1) from stb group by c1 order by t1 ")
|
||||||
|
tdSql.query(" select max(t2) from stb group by c1 order by t1 ")
|
||||||
|
tdSql.query(" select max(c1) from stb group by tbname order by tbname ")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
# bug need fix
|
||||||
|
# tdSql.query(" select max(t1) from stb group by t2 order by t2 ")
|
||||||
|
# tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.query(" select max(c1) from stb group by c1 order by c1 ")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
# support selective functions
|
||||||
|
tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
# bug need fix
|
||||||
|
# tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ")
|
||||||
|
# tdSql.checkRows(1)
|
||||||
|
# tdSql.checkData(0,0,"sub_stb_1")
|
||||||
|
|
||||||
|
tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2")
|
||||||
|
tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2")
|
||||||
|
tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
|
||||||
|
tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.checkData(0,1,self.row_nums-1)
|
||||||
|
|
||||||
|
tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1")
|
||||||
|
tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1")
|
||||||
|
tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2")
|
||||||
|
|
||||||
|
# # bug need fix
|
||||||
|
# tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2")
|
||||||
|
# tdSql.checkRows(self.tb_nums)
|
||||||
|
|
||||||
|
tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.checkData(0,1,self.row_nums-1)
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2")
|
||||||
|
|
||||||
|
tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc")
|
||||||
|
tdSql.checkRows(self.tb_nums+1)
|
||||||
|
tdSql.checkData(0,1,self.row_nums-1)
|
||||||
|
|
||||||
|
tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2")
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2")
|
||||||
|
tdSql.checkRows(self.tb_nums*(self.row_nums+5))
|
||||||
|
|
||||||
|
tdSql.query("select max(c1) , count(t2) from stb partition by c2 ")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
tdSql.checkData(0,1,self.row_nums)
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.checkCols(4)
|
||||||
|
|
||||||
|
tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1")
|
||||||
|
tdSql.checkRows(self.tb_nums)
|
||||||
|
tdSql.checkData(0,0,self.row_nums)
|
||||||
|
|
||||||
|
# bug need fix
|
||||||
|
# tdSql.query("select count(c1) , max(t1) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
|
||||||
|
# tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))")
|
||||||
|
tdSql.checkRows(self.row_nums+1)
|
||||||
|
|
||||||
|
|
||||||
|
# interval
|
||||||
|
tdSql.query("select max(c1) from stb interval(2s) sliding(1s)")
|
||||||
|
|
||||||
|
# bug need fix
|
||||||
|
|
||||||
|
tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
|
||||||
|
|
||||||
|
tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
|
||||||
|
|
||||||
|
tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)")
|
||||||
|
tdSql.checkRows(self.row_nums*2)
|
||||||
|
|
||||||
|
tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
|
||||||
|
tdSql.checkData(0,0,'sub_stb_1')
|
||||||
|
tdSql.checkData(0,1,self.row_nums)
|
||||||
|
|
||||||
|
# bug need fix
|
||||||
|
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
|
||||||
|
# tdSql.checkRows(5)
|
||||||
|
|
||||||
|
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 1 ")
|
||||||
|
# tdSql.checkRows(5)
|
||||||
|
|
||||||
|
tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
|
||||||
|
|
||||||
|
tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
|
||||||
|
tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
self.prepare_datas("stb",self.tb_nums,self.row_nums)
|
||||||
|
self.basic_query()
|
||||||
|
|
||||||
|
# # coverage case for taosd crash about bug fix
|
||||||
|
tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
|
||||||
|
tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
|
||||||
|
tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
|
||||||
|
tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
|
||||||
|
tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -16,9 +16,9 @@ from tmqCommon import *
|
||||||
|
|
||||||
class TDTestCase:
|
class TDTestCase:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.vgroups = 2
|
self.vgroups = 4
|
||||||
self.ctbNum = 100
|
self.ctbNum = 1000
|
||||||
self.rowsPerTbl = 10000
|
self.rowsPerTbl = 1000
|
||||||
|
|
||||||
def init(self, conn, logSql):
|
def init(self, conn, logSql):
|
||||||
tdLog.debug(f"start to excute {__file__}")
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
@ -29,7 +29,7 @@ class TDTestCase:
|
||||||
paraDict = {'dbName': 'dbt',
|
paraDict = {'dbName': 'dbt',
|
||||||
'dropFlag': 1,
|
'dropFlag': 1,
|
||||||
'event': '',
|
'event': '',
|
||||||
'vgroups': 3,
|
'vgroups': 4,
|
||||||
'stbName': 'stb',
|
'stbName': 'stb',
|
||||||
'colPrefix': 'c',
|
'colPrefix': 'c',
|
||||||
'tagPrefix': 't',
|
'tagPrefix': 't',
|
||||||
|
@ -37,14 +37,14 @@ class TDTestCase:
|
||||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
'ctbPrefix': 'ctb',
|
'ctbPrefix': 'ctb',
|
||||||
'ctbStartIdx': 0,
|
'ctbStartIdx': 0,
|
||||||
'ctbNum': 500,
|
'ctbNum': 1000,
|
||||||
'rowsPerTbl': 1000,
|
'rowsPerTbl': 1000,
|
||||||
'batchNum': 500,
|
'batchNum': 400,
|
||||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
'pollDelay': 3,
|
'pollDelay': 3,
|
||||||
'showMsg': 1,
|
'showMsg': 1,
|
||||||
'showRow': 1,
|
'showRow': 1,
|
||||||
'snapshot': 0}
|
'snapshot': 1}
|
||||||
|
|
||||||
paraDict['vgroups'] = self.vgroups
|
paraDict['vgroups'] = self.vgroups
|
||||||
paraDict['ctbNum'] = self.ctbNum
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
|
@ -54,20 +54,21 @@ class TDTestCase:
|
||||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
tdLog.info("create stb")
|
tdLog.info("create stb")
|
||||||
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||||
tdLog.info("create ctb")
|
# tdLog.info("create ctb")
|
||||||
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
# tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||||
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
# ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
tdLog.info("insert data")
|
# tdLog.info("insert data")
|
||||||
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||||
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||||
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
|
||||||
tdLog.info("restart taosd to ensure that the data falls into the disk")
|
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctbx",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
||||||
# tdDnodes.stop(1)
|
|
||||||
# tdDnodes.start(1)
|
# tdLog.info("restart taosd to ensure that the data falls into the disk")
|
||||||
tdSql.query("flush database %s"%(paraDict['dbName']))
|
# tdSql.query("flush database %s"%(paraDict['dbName']))
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# 自动建表完成数据插入,启动消费
|
||||||
def tmqCase1(self):
|
def tmqCase1(self):
|
||||||
tdLog.printNoPrefix("======== test case 1: ")
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
paraDict = {'dbName': 'dbt',
|
paraDict = {'dbName': 'dbt',
|
||||||
|
@ -90,21 +91,16 @@ class TDTestCase:
|
||||||
'showRow': 1,
|
'showRow': 1,
|
||||||
'snapshot': 1}
|
'snapshot': 1}
|
||||||
|
|
||||||
# paraDict['vgroups'] = self.vgroups
|
paraDict['vgroups'] = self.vgroups
|
||||||
# paraDict['ctbNum'] = self.ctbNum
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
# paraDict['rowsPerTbl'] = self.rowsPerTbl
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
tmqCom.initConsumerTable()
|
# tmqCom.initConsumerTable()
|
||||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
tdLog.info("create stb")
|
# tdLog.info("create stb")
|
||||||
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
# tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||||
tdLog.info("create ctb")
|
# tdLog.info("insert data by auto create ctb")
|
||||||
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
# tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
||||||
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
|
||||||
tdLog.info("insert data")
|
|
||||||
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
|
||||||
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
|
||||||
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
|
||||||
|
|
||||||
tdLog.info("create topics from stb1")
|
tdLog.info("create topics from stb1")
|
||||||
topicFromStb1 = 'topic_stb1'
|
topicFromStb1 = 'topic_stb1'
|
||||||
|
@ -120,20 +116,13 @@ class TDTestCase:
|
||||||
ifManualCommit = 0
|
ifManualCommit = 0
|
||||||
keyList = 'group.id:cgrp1,\
|
keyList = 'group.id:cgrp1,\
|
||||||
enable.auto.commit:true,\
|
enable.auto.commit:true,\
|
||||||
auto.commit.interval.ms:500,\
|
auto.commit.interval.ms:1000,\
|
||||||
auto.offset.reset:earliest'
|
auto.offset.reset:earliest'
|
||||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
tdLog.info("start consume processor")
|
tdLog.info("start consume processor")
|
||||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
|
||||||
# time.sleep(3)
|
|
||||||
tmqCom.getStartCommitNotifyFromTmqsim()
|
|
||||||
tdLog.info("================= restart dnode ===========================")
|
|
||||||
tdDnodes.stop(1)
|
|
||||||
tdDnodes.start(1)
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
tdLog.info("insert process end, and start to check consume result")
|
tdLog.info("insert process end, and start to check consume result")
|
||||||
expectRows = 1
|
expectRows = 1
|
||||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
@ -172,23 +161,23 @@ class TDTestCase:
|
||||||
'pollDelay': 5,
|
'pollDelay': 5,
|
||||||
'showMsg': 1,
|
'showMsg': 1,
|
||||||
'showRow': 1,
|
'showRow': 1,
|
||||||
'snapshot': 1}
|
'snapshot': 0}
|
||||||
|
|
||||||
# paraDict['vgroups'] = self.vgroups
|
paraDict['vgroups'] = self.vgroups
|
||||||
# paraDict['ctbNum'] = self.ctbNum
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
# paraDict['rowsPerTbl'] = self.rowsPerTbl
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
tmqCom.initConsumerTable()
|
tmqCom.initConsumerTable()
|
||||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
tdLog.info("create stb")
|
# tdLog.info("create stb")
|
||||||
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
# tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||||
tdLog.info("create ctb")
|
# tdLog.info("create ctb")
|
||||||
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
# tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||||
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
# ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
tdLog.info("insert data")
|
# tdLog.info("insert data")
|
||||||
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||||
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||||
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
tdLog.info("create topics from stb1")
|
tdLog.info("create topics from stb1")
|
||||||
topicFromStb1 = 'topic_stb1'
|
topicFromStb1 = 'topic_stb1'
|
||||||
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
||||||
|
@ -211,13 +200,7 @@ class TDTestCase:
|
||||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
|
||||||
tdLog.info("create some new child table and insert data ")
|
tdLog.info("create some new child table and insert data ")
|
||||||
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctby",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
||||||
|
|
||||||
tmqCom.getStartCommitNotifyFromTmqsim()
|
|
||||||
tdLog.info("================= restart dnode ===========================")
|
|
||||||
tdDnodes.stop(1)
|
|
||||||
tdDnodes.start(1)
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
tdLog.info("insert process end, and start to check consume result")
|
tdLog.info("insert process end, and start to check consume result")
|
||||||
expectRows = 1
|
expectRows = 1
|
||||||
|
@ -237,91 +220,13 @@ class TDTestCase:
|
||||||
|
|
||||||
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||||
|
|
||||||
# 自动建表完成数据插入,启动消费
|
|
||||||
def tmqCase3(self):
|
|
||||||
tdLog.printNoPrefix("======== test case 3: ")
|
|
||||||
paraDict = {'dbName': 'dbt',
|
|
||||||
'dropFlag': 1,
|
|
||||||
'event': '',
|
|
||||||
'vgroups': 4,
|
|
||||||
'stbName': 'stb',
|
|
||||||
'colPrefix': 'c',
|
|
||||||
'tagPrefix': 't',
|
|
||||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
|
||||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
|
||||||
'ctbPrefix': 'ctb',
|
|
||||||
'ctbStartIdx': 0,
|
|
||||||
'ctbNum': 1000,
|
|
||||||
'rowsPerTbl': 1000,
|
|
||||||
'batchNum': 400,
|
|
||||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
|
||||||
'pollDelay': 5,
|
|
||||||
'showMsg': 1,
|
|
||||||
'showRow': 1,
|
|
||||||
'snapshot': 1}
|
|
||||||
|
|
||||||
paraDict['vgroups'] = self.vgroups
|
|
||||||
paraDict['ctbNum'] = self.ctbNum
|
|
||||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
|
||||||
|
|
||||||
tmqCom.initConsumerTable()
|
|
||||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
|
||||||
tdLog.info("create stb")
|
|
||||||
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
|
||||||
tdLog.info("insert data by auto create ctb")
|
|
||||||
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
|
||||||
|
|
||||||
tdLog.info("create topics from stb1")
|
|
||||||
topicFromStb1 = 'topic_stb1'
|
|
||||||
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
|
||||||
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
|
|
||||||
tdLog.info("create topic sql: %s"%sqlString)
|
|
||||||
tdSql.execute(sqlString)
|
|
||||||
|
|
||||||
consumerId = 0
|
|
||||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
|
||||||
topicList = topicFromStb1
|
|
||||||
ifcheckdata = 0
|
|
||||||
ifManualCommit = 0
|
|
||||||
keyList = 'group.id:cgrp1,\
|
|
||||||
enable.auto.commit:true,\
|
|
||||||
auto.commit.interval.ms:1000,\
|
|
||||||
auto.offset.reset:earliest'
|
|
||||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
|
||||||
|
|
||||||
tdLog.info("start consume processor")
|
|
||||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
|
||||||
|
|
||||||
# tdLog.info("================= restart dnode ===========================")
|
|
||||||
# tdDnodes.stop(1)
|
|
||||||
# tdDnodes.start(1)
|
|
||||||
# time.sleep(2)
|
|
||||||
|
|
||||||
tdLog.info("insert process end, and start to check consume result")
|
|
||||||
expectRows = 1
|
|
||||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
|
||||||
totalConsumeRows = 0
|
|
||||||
for i in range(expectRows):
|
|
||||||
totalConsumeRows += resultList[i]
|
|
||||||
|
|
||||||
tdSql.query(queryString)
|
|
||||||
totalRowsInserted = tdSql.getRows()
|
|
||||||
|
|
||||||
if totalConsumeRows != totalRowsInserted:
|
|
||||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsInserted))
|
|
||||||
tdLog.exit("tmq consume rows error!")
|
|
||||||
|
|
||||||
tdSql.query("drop topic %s"%topicFromStb1)
|
|
||||||
|
|
||||||
tdLog.printNoPrefix("======== test case 3 end ...... ")
|
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
tdSql.prepare()
|
tdSql.prepare()
|
||||||
|
self.prepareTestEnv()
|
||||||
|
self.tmqCase1()
|
||||||
|
# self.tmqCase2() TD-17267
|
||||||
|
|
||||||
# self.tmqCase1()
|
|
||||||
# self.tmqCase2()
|
|
||||||
self.tmqCase3()
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -0,0 +1,253 @@
|
||||||
|
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
sys.path.append("./7-tmq")
|
||||||
|
from tmqCommon import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def __init__(self):
|
||||||
|
self.vgroups = 2
|
||||||
|
self.ctbNum = 100
|
||||||
|
self.rowsPerTbl = 10000
|
||||||
|
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), False)
|
||||||
|
|
||||||
|
def prepareTestEnv(self):
|
||||||
|
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
|
||||||
|
paraDict = {'dbName': 'dbt',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 3,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 500,
|
||||||
|
'rowsPerTbl': 1000,
|
||||||
|
'batchNum': 500,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
'pollDelay': 3,
|
||||||
|
'showMsg': 1,
|
||||||
|
'showRow': 1,
|
||||||
|
'snapshot': 0}
|
||||||
|
|
||||||
|
paraDict['vgroups'] = self.vgroups
|
||||||
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
|
tdLog.info("create stb")
|
||||||
|
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||||
|
tdLog.info("create ctb")
|
||||||
|
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||||
|
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
tdLog.info("insert data")
|
||||||
|
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||||
|
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||||
|
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
|
||||||
|
tdLog.info("restart taosd to ensure that the data falls into the disk")
|
||||||
|
# tdDnodes.stop(1)
|
||||||
|
# tdDnodes.start(1)
|
||||||
|
tdSql.query("flush database %s"%(paraDict['dbName']))
|
||||||
|
return
|
||||||
|
|
||||||
|
def tmqCase1(self):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'dbt',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 1000,
|
||||||
|
'rowsPerTbl': 1000,
|
||||||
|
'batchNum': 400,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
'pollDelay': 5,
|
||||||
|
'showMsg': 1,
|
||||||
|
'showRow': 1,
|
||||||
|
'snapshot': 1}
|
||||||
|
|
||||||
|
# paraDict['vgroups'] = self.vgroups
|
||||||
|
# paraDict['ctbNum'] = self.ctbNum
|
||||||
|
# paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
|
tdLog.info("create stb")
|
||||||
|
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||||
|
tdLog.info("create ctb")
|
||||||
|
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||||
|
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
tdLog.info("insert data")
|
||||||
|
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||||
|
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||||
|
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
|
||||||
|
tdLog.info("create topics from stb1")
|
||||||
|
topicFromStb1 = 'topic_stb1'
|
||||||
|
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
||||||
|
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
|
||||||
|
tdLog.info("create topic sql: %s"%sqlString)
|
||||||
|
tdSql.execute(sqlString)
|
||||||
|
|
||||||
|
consumerId = 0
|
||||||
|
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||||
|
topicList = topicFromStb1
|
||||||
|
ifcheckdata = 0
|
||||||
|
ifManualCommit = 0
|
||||||
|
keyList = 'group.id:cgrp1,\
|
||||||
|
enable.auto.commit:true,\
|
||||||
|
auto.commit.interval.ms:500,\
|
||||||
|
auto.offset.reset:earliest'
|
||||||
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
|
tdLog.info("start consume processor")
|
||||||
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
|
||||||
|
# time.sleep(3)
|
||||||
|
tmqCom.getStartCommitNotifyFromTmqsim()
|
||||||
|
tdLog.info("================= restart dnode ===========================")
|
||||||
|
tdDnodes.stop(1)
|
||||||
|
tdDnodes.start(1)
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
tdLog.info("insert process end, and start to check consume result")
|
||||||
|
expectRows = 1
|
||||||
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
totalConsumeRows = 0
|
||||||
|
for i in range(expectRows):
|
||||||
|
totalConsumeRows += resultList[i]
|
||||||
|
|
||||||
|
tdSql.query(queryString)
|
||||||
|
totalRowsInserted = tdSql.getRows()
|
||||||
|
|
||||||
|
if totalConsumeRows != totalRowsInserted:
|
||||||
|
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsInserted))
|
||||||
|
tdLog.exit("tmq consume rows error!")
|
||||||
|
|
||||||
|
tdSql.query("drop topic %s"%topicFromStb1)
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||||
|
|
||||||
|
def tmqCase2(self):
|
||||||
|
tdLog.printNoPrefix("======== test case 2: ")
|
||||||
|
paraDict = {'dbName': 'dbt',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 4,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 1000,
|
||||||
|
'rowsPerTbl': 1000,
|
||||||
|
'batchNum': 1000,
|
||||||
|
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||||
|
'pollDelay': 5,
|
||||||
|
'showMsg': 1,
|
||||||
|
'showRow': 1,
|
||||||
|
'snapshot': 1}
|
||||||
|
|
||||||
|
# paraDict['vgroups'] = self.vgroups
|
||||||
|
# paraDict['ctbNum'] = self.ctbNum
|
||||||
|
# paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
tmqCom.initConsumerTable()
|
||||||
|
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
|
||||||
|
tdLog.info("create stb")
|
||||||
|
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||||
|
tdLog.info("create ctb")
|
||||||
|
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||||
|
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
tdLog.info("insert data")
|
||||||
|
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||||
|
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||||
|
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||||
|
tdLog.info("create topics from stb1")
|
||||||
|
topicFromStb1 = 'topic_stb1'
|
||||||
|
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
||||||
|
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
|
||||||
|
tdLog.info("create topic sql: %s"%sqlString)
|
||||||
|
tdSql.execute(sqlString)
|
||||||
|
|
||||||
|
consumerId = 0
|
||||||
|
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
|
||||||
|
topicList = topicFromStb1
|
||||||
|
ifcheckdata = 0
|
||||||
|
ifManualCommit = 0
|
||||||
|
keyList = 'group.id:cgrp1,\
|
||||||
|
enable.auto.commit:true,\
|
||||||
|
auto.commit.interval.ms:1000,\
|
||||||
|
auto.offset.reset:earliest'
|
||||||
|
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||||
|
|
||||||
|
tdLog.info("start consume processor")
|
||||||
|
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||||
|
|
||||||
|
tdLog.info("create some new child table and insert data ")
|
||||||
|
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
||||||
|
|
||||||
|
tmqCom.getStartCommitNotifyFromTmqsim()
|
||||||
|
tdLog.info("================= restart dnode ===========================")
|
||||||
|
tdDnodes.stop(1)
|
||||||
|
tdDnodes.start(1)
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
tdLog.info("insert process end, and start to check consume result")
|
||||||
|
expectRows = 1
|
||||||
|
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||||
|
totalConsumeRows = 0
|
||||||
|
for i in range(expectRows):
|
||||||
|
totalConsumeRows += resultList[i]
|
||||||
|
|
||||||
|
tdSql.query(queryString)
|
||||||
|
totalRowsInserted = tdSql.getRows()
|
||||||
|
|
||||||
|
if totalConsumeRows != totalRowsInserted:
|
||||||
|
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsInserted))
|
||||||
|
tdLog.exit("tmq consume rows error!")
|
||||||
|
|
||||||
|
tdSql.query("drop topic %s"%topicFromStb1)
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
self.tmqCase1()
|
||||||
|
self.tmqCase2()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -118,9 +118,11 @@ python3 ./test.py -f 2-query/distribute_agg_stddev.py
|
||||||
python3 ./test.py -f 2-query/twa.py
|
python3 ./test.py -f 2-query/twa.py
|
||||||
python3 ./test.py -f 2-query/irate.py
|
python3 ./test.py -f 2-query/irate.py
|
||||||
python3 ./test.py -f 2-query/and_or_for_byte.py
|
python3 ./test.py -f 2-query/and_or_for_byte.py
|
||||||
|
python3 ./test.py -f 2-query/count_partition.py
|
||||||
python3 ./test.py -f 2-query/function_null.py
|
python3 ./test.py -f 2-query/function_null.py
|
||||||
#python3 ./test.py -f 2-query/queryQnode.py
|
python3 ./test.py -f 2-query/queryQnode.py
|
||||||
|
python3 ./test.py -f 2-query/max_partition.py
|
||||||
|
|
||||||
|
|
||||||
python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
||||||
#BUG python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
|
#BUG python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
|
||||||
|
@ -174,8 +176,8 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
|
||||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
|
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
|
||||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
||||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
|
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
|
||||||
|
python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
|
||||||
|
#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
|
||||||
|
|
||||||
#------------querPolicy 2-----------
|
#------------querPolicy 2-----------
|
||||||
|
|
||||||
|
@ -263,6 +265,8 @@ python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 2
|
||||||
python3 ./test.py -f 2-query/twa.py -Q 2
|
python3 ./test.py -f 2-query/twa.py -Q 2
|
||||||
python3 ./test.py -f 2-query/irate.py -Q 2
|
python3 ./test.py -f 2-query/irate.py -Q 2
|
||||||
python3 ./test.py -f 2-query/function_null.py -Q 2
|
python3 ./test.py -f 2-query/function_null.py -Q 2
|
||||||
|
python3 ./test.py -f 2-query/count_partition.py -Q 2
|
||||||
|
python3 ./test.py -f 2-query/max_partition.py -Q 2
|
||||||
|
|
||||||
#------------querPolicy 3-----------
|
#------------querPolicy 3-----------
|
||||||
|
|
||||||
|
@ -348,3 +352,5 @@ python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 3
|
||||||
python3 ./test.py -f 2-query/twa.py -Q 3
|
python3 ./test.py -f 2-query/twa.py -Q 3
|
||||||
python3 ./test.py -f 2-query/irate.py -Q 3
|
python3 ./test.py -f 2-query/irate.py -Q 3
|
||||||
python3 ./test.py -f 2-query/function_null.py -Q 3
|
python3 ./test.py -f 2-query/function_null.py -Q 3
|
||||||
|
python3 ./test.py -f 2-query/count_partition.py -Q 3
|
||||||
|
python3 ./test.py -f 2-query/max_partition.py -Q 3
|
|
@ -1 +1 @@
|
||||||
Subproject commit 7a94ffab45f08e16f09b3f430fe75d717054adb6
|
Subproject commit 6dccac192a2ae7dd78718ab926201aab5419327a
|
Loading…
Reference in New Issue