add function to frame
This commit is contained in:
commit
bf3606f395
|
@ -226,9 +226,10 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| ------------- | --------------------------------------------------------------------------------------------------------------- |
|
| ------------- | --------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
| Meaning | When the Last, First, and LastRow functions are queried and no alias is specified, the alias is automatically set to the column name (excluding the function name). Therefore, if the order by clause refers to the column name, it will automatically refer to the function corresponding to the column. |
|
||||||
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
| Value Range | 1 means automatically setting the alias to the column name (excluding the function name), 0 means not automatically setting the alias. |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
|
| Notes | When multiple of the above functions act on the same column at the same time and no alias is specified, if the order by clause refers to the column name, column selection ambiguous will occur because the aliases of multiple columns are the same. |
|
||||||
|
|
||||||
## Locale Parameters
|
## Locale Parameters
|
||||||
|
|
||||||
|
|
|
@ -215,9 +215,10 @@ taos -C
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ----------------------------------------------------------- |
|
| -------- | ----------------------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
|
| 含义 | Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数 |
|
||||||
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
| 取值范围 | 1 表示自动设置别名为列名(不包含函数名), 0 表示不自动设置别名。 |
|
||||||
| 缺省值 | 0 |
|
| 缺省值 | 0 |
|
||||||
|
| 补充说明 | 当同时出现多个上述函数作用于同一列且未指定别名时,如果 order by 子句引用了该列名,将会因为多列别名相同引发列选择冲突|
|
||||||
|
|
||||||
### countAlwaysReturnValue
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
|
|
|
@ -155,6 +155,7 @@ typedef struct STscObj {
|
||||||
int8_t biMode;
|
int8_t biMode;
|
||||||
int32_t acctId;
|
int32_t acctId;
|
||||||
uint32_t connId;
|
uint32_t connId;
|
||||||
|
int32_t appHbMgrIdx;
|
||||||
int64_t id; // ref ID returned by taosAddRef
|
int64_t id; // ref ID returned by taosAddRef
|
||||||
TdThreadMutex mutex; // used to protect the operation on db
|
TdThreadMutex mutex; // used to protect the operation on db
|
||||||
int32_t numOfReqs; // number of sqlObj bound to this connection
|
int32_t numOfReqs; // number of sqlObj bound to this connection
|
||||||
|
|
|
@ -283,6 +283,7 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
|
||||||
|
|
||||||
pObj->connType = connType;
|
pObj->connType = connType;
|
||||||
pObj->pAppInfo = pAppInfo;
|
pObj->pAppInfo = pAppInfo;
|
||||||
|
pObj->appHbMgrIdx = pAppInfo->pAppHbMgr->idx;
|
||||||
tstrncpy(pObj->user, user, sizeof(pObj->user));
|
tstrncpy(pObj->user, user, sizeof(pObj->user));
|
||||||
memcpy(pObj->pass, auth, TSDB_PASSWORD_LEN);
|
memcpy(pObj->pass, auth, TSDB_PASSWORD_LEN);
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ typedef struct {
|
||||||
};
|
};
|
||||||
} SHbParam;
|
} SHbParam;
|
||||||
|
|
||||||
static SClientHbMgr clientHbMgr = {0};
|
SClientHbMgr clientHbMgr = {0};
|
||||||
|
|
||||||
static int32_t hbCreateThread();
|
static int32_t hbCreateThread();
|
||||||
static void hbStopThread();
|
static void hbStopThread();
|
||||||
|
@ -1294,9 +1294,8 @@ void hbMgrCleanUp() {
|
||||||
|
|
||||||
taosThreadMutexLock(&clientHbMgr.lock);
|
taosThreadMutexLock(&clientHbMgr.lock);
|
||||||
appHbMgrCleanup();
|
appHbMgrCleanup();
|
||||||
taosArrayDestroy(clientHbMgr.appHbMgrs);
|
clientHbMgr.appHbMgrs = taosArrayDestroy(clientHbMgr.appHbMgrs);
|
||||||
taosThreadMutexUnlock(&clientHbMgr.lock);
|
taosThreadMutexUnlock(&clientHbMgr.lock);
|
||||||
clientHbMgr.appHbMgrs = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int hbRegisterConnImpl(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, int64_t clusterId) {
|
int hbRegisterConnImpl(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, int64_t clusterId) {
|
||||||
|
@ -1335,19 +1334,18 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
|
||||||
}
|
}
|
||||||
|
|
||||||
void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) {
|
void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) {
|
||||||
SAppHbMgr *pAppHbMgr = pTscObj->pAppInfo->pAppHbMgr;
|
taosThreadMutexLock(&clientHbMgr.lock);
|
||||||
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, pTscObj->appHbMgrIdx);
|
||||||
if (pReq) {
|
if (pAppHbMgr) {
|
||||||
tFreeClientHbReq(pReq);
|
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
||||||
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
if (pReq) {
|
||||||
taosHashRelease(pAppHbMgr->activeInfo, pReq);
|
tFreeClientHbReq(pReq);
|
||||||
|
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
||||||
|
taosHashRelease(pAppHbMgr->activeInfo, pReq);
|
||||||
|
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
taosThreadMutexUnlock(&clientHbMgr.lock);
|
||||||
if (NULL == pReq) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner
|
// set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner
|
||||||
|
|
|
@ -26,6 +26,8 @@
|
||||||
#include "tname.h"
|
#include "tname.h"
|
||||||
#include "tversion.h"
|
#include "tversion.h"
|
||||||
|
|
||||||
|
extern SClientHbMgr clientHbMgr;
|
||||||
|
|
||||||
static void setErrno(SRequestObj* pRequest, int32_t code) {
|
static void setErrno(SRequestObj* pRequest, int32_t code) {
|
||||||
pRequest->code = code;
|
pRequest->code = code;
|
||||||
terrno = code;
|
terrno = code;
|
||||||
|
@ -63,8 +65,9 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
|
|
||||||
STscObj* pTscObj = pRequest->pTscObj;
|
STscObj* pTscObj = pRequest->pTscObj;
|
||||||
|
|
||||||
if (NULL == pTscObj->pAppInfo || NULL == pTscObj->pAppInfo->pAppHbMgr) {
|
if (NULL == pTscObj->pAppInfo) {
|
||||||
setErrno(pRequest, TSDB_CODE_TSC_DISCONNECTED);
|
code = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
setErrno(pRequest, code);
|
||||||
tsem_post(&pRequest->body.rspSem);
|
tsem_post(&pRequest->body.rspSem);
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
|
@ -95,7 +98,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (connectRsp.epSet.numOfEps == 0) {
|
if (connectRsp.epSet.numOfEps == 0) {
|
||||||
setErrno(pRequest, TSDB_CODE_APP_ERROR);
|
code = TSDB_CODE_APP_ERROR;
|
||||||
|
setErrno(pRequest, code);
|
||||||
tsem_post(&pRequest->body.rspSem);
|
tsem_post(&pRequest->body.rspSem);
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
|
@ -142,7 +146,18 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
pTscObj->authVer = connectRsp.authVer;
|
pTscObj->authVer = connectRsp.authVer;
|
||||||
pTscObj->whiteListInfo.ver = connectRsp.whiteListVer;
|
pTscObj->whiteListInfo.ver = connectRsp.whiteListVer;
|
||||||
|
|
||||||
hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType);
|
taosThreadMutexLock(&clientHbMgr.lock);
|
||||||
|
SAppHbMgr* pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, pTscObj->appHbMgrIdx);
|
||||||
|
if (pAppHbMgr) {
|
||||||
|
hbRegisterConn(pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType);
|
||||||
|
} else {
|
||||||
|
taosThreadMutexUnlock(&clientHbMgr.lock);
|
||||||
|
code = TSDB_CODE_TSC_DISCONNECTED;
|
||||||
|
setErrno(pRequest, code);
|
||||||
|
tsem_post(&pRequest->body.rspSem);
|
||||||
|
goto End;
|
||||||
|
}
|
||||||
|
taosThreadMutexUnlock(&clientHbMgr.lock);
|
||||||
|
|
||||||
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
|
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
|
||||||
pTscObj->pAppInfo->numOfConns);
|
pTscObj->pAppInfo->numOfConns);
|
||||||
|
|
|
@ -941,7 +941,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
||||||
}
|
}
|
||||||
|
|
||||||
if(lastrowTmpIndexArray != NULL) {
|
if(lastrowTmpIndexArray != NULL) {
|
||||||
mergeLastCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds);
|
mergeLastRowCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds);
|
||||||
for(int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) {
|
for(int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) {
|
||||||
taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i));
|
taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i));
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,6 +200,7 @@ typedef struct SExchangeInfo {
|
||||||
uint64_t self;
|
uint64_t self;
|
||||||
SLimitInfo limitInfo;
|
SLimitInfo limitInfo;
|
||||||
int64_t openedTs; // start exec time stamp, todo: move to SLoadRemoteDataInfo
|
int64_t openedTs; // start exec time stamp, todo: move to SLoadRemoteDataInfo
|
||||||
|
char* pTaskId;
|
||||||
} SExchangeInfo;
|
} SExchangeInfo;
|
||||||
|
|
||||||
typedef struct SScanInfo {
|
typedef struct SScanInfo {
|
||||||
|
@ -272,7 +273,8 @@ typedef struct STableScanInfo {
|
||||||
SSampleExecInfo sample; // sample execution info
|
SSampleExecInfo sample; // sample execution info
|
||||||
int32_t tableStartIndex; // current group scan start
|
int32_t tableStartIndex; // current group scan start
|
||||||
int32_t tableEndIndex; // current group scan end
|
int32_t tableEndIndex; // current group scan end
|
||||||
int32_t currentGroupIndex; // current group index of groupOffset
|
int32_t currentGroupId;
|
||||||
|
int32_t currentTable;
|
||||||
int8_t scanMode;
|
int8_t scanMode;
|
||||||
int8_t assignBlockUid;
|
int8_t assignBlockUid;
|
||||||
uint8_t countState; // empty table count state
|
uint8_t countState; // empty table count state
|
||||||
|
|
|
@ -260,14 +260,17 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t len = strlen(id) + 1;
|
||||||
|
pInfo->pTaskId = taosMemoryCalloc(1, len);
|
||||||
|
strncpy(pInfo->pTaskId, id, len);
|
||||||
for (int32_t i = 0; i < numOfSources; ++i) {
|
for (int32_t i = 0; i < numOfSources; ++i) {
|
||||||
SSourceDataInfo dataInfo = {0};
|
SSourceDataInfo dataInfo = {0};
|
||||||
dataInfo.status = EX_SOURCE_DATA_NOT_READY;
|
dataInfo.status = EX_SOURCE_DATA_NOT_READY;
|
||||||
dataInfo.taskId = id;
|
dataInfo.taskId = pInfo->pTaskId;
|
||||||
dataInfo.index = i;
|
dataInfo.index = i;
|
||||||
SSourceDataInfo* pDs = taosArrayPush(pInfo->pSourceDataInfo, &dataInfo);
|
SSourceDataInfo* pDs = taosArrayPush(pInfo->pSourceDataInfo, &dataInfo);
|
||||||
if (pDs == NULL) {
|
if (pDs == NULL) {
|
||||||
taosArrayDestroy(pInfo->pSourceDataInfo);
|
taosArrayDestroyEx(pInfo->pSourceDataInfo, freeSourceDataInfo);
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -383,6 +386,8 @@ void doDestroyExchangeOperatorInfo(void* param) {
|
||||||
tSimpleHashCleanup(pExInfo->pHashSources);
|
tSimpleHashCleanup(pExInfo->pHashSources);
|
||||||
|
|
||||||
tsem_destroy(&pExInfo->ready);
|
tsem_destroy(&pExInfo->ready);
|
||||||
|
taosMemoryFreeClear(pExInfo->pTaskId);
|
||||||
|
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -782,7 +787,7 @@ int32_t addSingleExchangeSource(SOperatorInfo* pOperator, SExchangeOperatorBasic
|
||||||
if (pIdx->inUseIdx < 0) {
|
if (pIdx->inUseIdx < 0) {
|
||||||
SSourceDataInfo dataInfo = {0};
|
SSourceDataInfo dataInfo = {0};
|
||||||
dataInfo.status = EX_SOURCE_DATA_NOT_READY;
|
dataInfo.status = EX_SOURCE_DATA_NOT_READY;
|
||||||
dataInfo.taskId = GET_TASKID(pOperator->pTaskInfo);
|
dataInfo.taskId = pExchangeInfo->pTaskId;
|
||||||
dataInfo.index = pIdx->srcIdx;
|
dataInfo.index = pIdx->srcIdx;
|
||||||
dataInfo.pSrcUidList = taosArrayDup(pBasicParam->uidList, NULL);
|
dataInfo.pSrcUidList = taosArrayDup(pBasicParam->uidList, NULL);
|
||||||
dataInfo.srcOpType = pBasicParam->srcOpType;
|
dataInfo.srcOpType = pBasicParam->srcOpType;
|
||||||
|
|
|
@ -646,10 +646,6 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (initRemainGroups) {
|
|
||||||
pTableListInfo->numOfOuputGroups = taosHashGetSize(pTableListInfo->remainGroups);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tsTagFilterCache) {
|
if (tsTagFilterCache) {
|
||||||
tableList = taosArrayDup(pTableListInfo->pTableList, NULL);
|
tableList = taosArrayDup(pTableListInfo->pTableList, NULL);
|
||||||
pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest),
|
pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest),
|
||||||
|
@ -2142,6 +2138,8 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
|
||||||
pTableListInfo->numOfOuputGroups = numOfTables;
|
pTableListInfo->numOfOuputGroups = numOfTables;
|
||||||
} else if (groupByTbname && pScanNode->groupOrderScan){
|
} else if (groupByTbname && pScanNode->groupOrderScan){
|
||||||
pTableListInfo->numOfOuputGroups = numOfTables;
|
pTableListInfo->numOfOuputGroups = numOfTables;
|
||||||
|
} else if (groupByTbname && tsCountAlwaysReturnValue && ((STableScanPhysiNode*)pScanNode)->needCountEmptyTable) {
|
||||||
|
pTableListInfo->numOfOuputGroups = numOfTables;
|
||||||
} else {
|
} else {
|
||||||
pTableListInfo->numOfOuputGroups = 1;
|
pTableListInfo->numOfOuputGroups = 1;
|
||||||
}
|
}
|
||||||
|
@ -2159,6 +2157,8 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pScanNode->groupOrderScan) pTableListInfo->numOfOuputGroups = taosArrayGetSize(pTableListInfo->pTableList);
|
||||||
|
|
||||||
if (groupSort || pScanNode->groupOrderScan) {
|
if (groupSort || pScanNode->groupOrderScan) {
|
||||||
code = sortTableGroup(pTableListInfo);
|
code = sortTableGroup(pTableListInfo);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1264,7 +1264,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
|
||||||
STableKeyInfo* pTableInfo = tableListGetInfo(pTableListInfo, 0);
|
STableKeyInfo* pTableInfo = tableListGetInfo(pTableListInfo, 0);
|
||||||
uid = pTableInfo->uid;
|
uid = pTableInfo->uid;
|
||||||
ts = INT64_MIN;
|
ts = INT64_MIN;
|
||||||
pScanInfo->tableEndIndex = 0;
|
pScanInfo->currentTable = 0;
|
||||||
} else {
|
} else {
|
||||||
taosRUnLockLatch(&pTaskInfo->lock);
|
taosRUnLockLatch(&pTaskInfo->lock);
|
||||||
qError("no table in table list, %s", id);
|
qError("no table in table list, %s", id);
|
||||||
|
@ -1278,16 +1278,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
|
||||||
pInfo->pTableScanOp->resultInfo.totalRows = 0;
|
pInfo->pTableScanOp->resultInfo.totalRows = 0;
|
||||||
|
|
||||||
// start from current accessed position
|
// start from current accessed position
|
||||||
// we cannot start from the pScanInfo->tableEndIndex, since the commit offset may cause the rollback of the start
|
// we cannot start from the pScanInfo->currentTable, since the commit offset may cause the rollback of the start
|
||||||
// position, let's find it from the beginning.
|
// position, let's find it from the beginning.
|
||||||
index = tableListFind(pTableListInfo, uid, 0);
|
index = tableListFind(pTableListInfo, uid, 0);
|
||||||
taosRUnLockLatch(&pTaskInfo->lock);
|
taosRUnLockLatch(&pTaskInfo->lock);
|
||||||
|
|
||||||
if (index >= 0) {
|
if (index >= 0) {
|
||||||
pScanInfo->tableEndIndex = index;
|
pScanInfo->currentTable = index;
|
||||||
} else {
|
} else {
|
||||||
qError("vgId:%d uid:%" PRIu64 " not found in table list, total:%d, index:%d %s", pTaskInfo->id.vgId, uid,
|
qError("vgId:%d uid:%" PRIu64 " not found in table list, total:%d, index:%d %s", pTaskInfo->id.vgId, uid,
|
||||||
numOfTables, pScanInfo->tableEndIndex, id);
|
numOfTables, pScanInfo->currentTable, id);
|
||||||
terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
|
terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1310,12 +1310,12 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("tsdb reader created with offset(snapshot) uid:%" PRId64 " ts:%" PRId64 " table index:%d, total:%d, %s",
|
qDebug("tsdb reader created with offset(snapshot) uid:%" PRId64 " ts:%" PRId64 " table index:%d, total:%d, %s",
|
||||||
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id);
|
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id);
|
||||||
} else {
|
} else {
|
||||||
pTaskInfo->storageAPI.tsdReader.tsdSetQueryTableList(pScanBaseInfo->dataReader, &keyInfo, 1);
|
pTaskInfo->storageAPI.tsdReader.tsdSetQueryTableList(pScanBaseInfo->dataReader, &keyInfo, 1);
|
||||||
pTaskInfo->storageAPI.tsdReader.tsdReaderResetStatus(pScanBaseInfo->dataReader, &pScanBaseInfo->cond);
|
pTaskInfo->storageAPI.tsdReader.tsdReaderResetStatus(pScanBaseInfo->dataReader, &pScanBaseInfo->cond);
|
||||||
qDebug("tsdb reader offset seek snapshot to uid:%" PRId64 " ts %" PRId64 " table index:%d numOfTable:%d, %s",
|
qDebug("tsdb reader offset seek snapshot to uid:%" PRId64 " ts %" PRId64 " table index:%d numOfTable:%d, %s",
|
||||||
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id);
|
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore the key value
|
// restore the key value
|
||||||
|
|
|
@ -657,33 +657,17 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData,
|
||||||
|
|
||||||
|
|
||||||
static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, int32_t* size) {
|
static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, int32_t* size) {
|
||||||
pInfo->tableStartIndex = pInfo->tableEndIndex + 1;
|
tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, pKeyInfo, size);
|
||||||
|
|
||||||
STableListInfo* pTableListInfo = pInfo->base.pTableListInfo;
|
pInfo->tableStartIndex = TARRAY_ELEM_IDX(pInfo->base.pTableListInfo->pTableList, *pKeyInfo);
|
||||||
int32_t numOfTables = tableListGetSize(pTableListInfo);
|
|
||||||
STableKeyInfo* pStart = (STableKeyInfo*)tableListGetInfo(pTableListInfo, pInfo->tableStartIndex);
|
|
||||||
|
|
||||||
if (pTableListInfo->oneTableForEachGroup) {
|
pInfo->tableEndIndex = (pInfo->tableStartIndex + (*size) - 1);
|
||||||
pInfo->tableEndIndex = pInfo->tableStartIndex;
|
|
||||||
} else if (pTableListInfo->groupOffset) {
|
|
||||||
pInfo->currentGroupIndex++;
|
|
||||||
if (pInfo->currentGroupIndex + 1 < pTableListInfo->numOfOuputGroups) {
|
|
||||||
pInfo->tableEndIndex = pTableListInfo->groupOffset[pInfo->currentGroupIndex + 1] - 1;
|
|
||||||
} else {
|
|
||||||
pInfo->tableEndIndex = numOfTables - 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pInfo->tableEndIndex = numOfTables - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!pInfo->needCountEmptyTable) {
|
if (!pInfo->needCountEmptyTable) {
|
||||||
pInfo->countState = TABLE_COUNT_STATE_END;
|
pInfo->countState = TABLE_COUNT_STATE_END;
|
||||||
} else {
|
} else {
|
||||||
pInfo->countState = TABLE_COUNT_STATE_SCAN;
|
pInfo->countState = TABLE_COUNT_STATE_SCAN;
|
||||||
}
|
}
|
||||||
|
|
||||||
*pKeyInfo = pStart;
|
|
||||||
*size = pInfo->tableEndIndex - pInfo->tableStartIndex + 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) {
|
void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) {
|
||||||
|
@ -939,7 +923,7 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) {
|
||||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||||
SStorageAPI* pAPI = &pTaskInfo->storageAPI;
|
SStorageAPI* pAPI = &pTaskInfo->storageAPI;
|
||||||
int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
|
int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
|
||||||
if (pInfo->tableEndIndex + 1 >= numOfTables) {
|
if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) {
|
||||||
setOperatorCompleted(pOperator);
|
setOperatorCompleted(pOperator);
|
||||||
if (pOperator->dynamicTask) {
|
if (pOperator->dynamicTask) {
|
||||||
taosArrayClear(pInfo->base.pTableListInfo->pTableList);
|
taosArrayClear(pInfo->base.pTableListInfo->pTableList);
|
||||||
|
@ -978,13 +962,14 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) {
|
||||||
int32_t num = 0;
|
int32_t num = 0;
|
||||||
STableKeyInfo* pList = NULL;
|
STableKeyInfo* pList = NULL;
|
||||||
|
|
||||||
if (pInfo->tableEndIndex == -1) {
|
if (pInfo->currentGroupId == -1) {
|
||||||
int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
|
int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
|
||||||
if (pInfo->tableEndIndex + 1 == numOfTables) {
|
if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) {
|
||||||
setOperatorCompleted(pOperator);
|
setOperatorCompleted(pOperator);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
initNextGroupScan(pInfo, &pList, &num);
|
initNextGroupScan(pInfo, &pList, &num);
|
||||||
ASSERT(pInfo->base.dataReader == NULL);
|
ASSERT(pInfo->base.dataReader == NULL);
|
||||||
|
|
||||||
|
@ -1034,7 +1019,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
||||||
T_LONG_JMP(pTaskInfo->env, code);
|
T_LONG_JMP(pTaskInfo->env, code);
|
||||||
}
|
}
|
||||||
if (pOperator->status == OP_EXEC_DONE) {
|
if (pOperator->status == OP_EXEC_DONE) {
|
||||||
pInfo->tableEndIndex = -1;
|
pInfo->currentGroupId = -1;
|
||||||
pOperator->status = OP_OPENED;
|
pOperator->status = OP_OPENED;
|
||||||
SSDataBlock* result = NULL;
|
SSDataBlock* result = NULL;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -1059,23 +1044,23 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// if no data, switch to next table and continue scan
|
// if no data, switch to next table and continue scan
|
||||||
pInfo->tableEndIndex++;
|
pInfo->currentTable++;
|
||||||
|
|
||||||
taosRLockLatch(&pTaskInfo->lock);
|
taosRLockLatch(&pTaskInfo->lock);
|
||||||
numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
|
numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
|
||||||
|
|
||||||
if (pInfo->tableEndIndex >= numOfTables) {
|
if (pInfo->currentTable >= numOfTables) {
|
||||||
qDebug("all table checked in table list, total:%d, return NULL, %s", numOfTables, GET_TASKID(pTaskInfo));
|
qDebug("all table checked in table list, total:%d, return NULL, %s", numOfTables, GET_TASKID(pTaskInfo));
|
||||||
taosRUnLockLatch(&pTaskInfo->lock);
|
taosRUnLockLatch(&pTaskInfo->lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableEndIndex);
|
tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->currentTable);
|
||||||
taosRUnLockLatch(&pTaskInfo->lock);
|
taosRUnLockLatch(&pTaskInfo->lock);
|
||||||
|
|
||||||
pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, &tInfo, 1);
|
pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, &tInfo, 1);
|
||||||
qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d/%d %s", tInfo.uid, numOfTables,
|
qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d/%d %s", tInfo.uid, numOfTables,
|
||||||
pInfo->tableEndIndex, numOfTables, GET_TASKID(pTaskInfo));
|
pInfo->currentTable, numOfTables, GET_TASKID(pTaskInfo));
|
||||||
|
|
||||||
pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond);
|
pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond);
|
||||||
pInfo->scanTimes = 0;
|
pInfo->scanTimes = 0;
|
||||||
|
@ -1167,9 +1152,10 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto _error;
|
goto _error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pInfo->currentGroupId = -1;
|
||||||
|
|
||||||
pInfo->tableEndIndex = -1;
|
pInfo->tableEndIndex = -1;
|
||||||
pInfo->currentGroupIndex = -1;
|
|
||||||
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
|
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
|
||||||
pInfo->hasGroupByTag = pTableScanNode->pGroupTags ? true : false;
|
pInfo->hasGroupByTag = pTableScanNode->pGroupTags ? true : false;
|
||||||
|
|
||||||
|
@ -1264,6 +1250,7 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint6
|
||||||
pTableScanInfo->base.cond.startVersion = 0;
|
pTableScanInfo->base.cond.startVersion = 0;
|
||||||
pTableScanInfo->base.cond.endVersion = ver;
|
pTableScanInfo->base.cond.endVersion = ver;
|
||||||
pTableScanInfo->scanTimes = 0;
|
pTableScanInfo->scanTimes = 0;
|
||||||
|
pTableScanInfo->currentGroupId = -1;
|
||||||
pTableScanInfo->tableEndIndex = -1;
|
pTableScanInfo->tableEndIndex = -1;
|
||||||
pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader);
|
pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader);
|
||||||
pTableScanInfo->base.dataReader = NULL;
|
pTableScanInfo->base.dataReader = NULL;
|
||||||
|
@ -2167,7 +2154,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
||||||
pInfo->pTableScanOp->status = OP_OPENED;
|
pInfo->pTableScanOp->status = OP_OPENED;
|
||||||
|
|
||||||
pTSInfo->scanTimes = 0;
|
pTSInfo->scanTimes = 0;
|
||||||
pTSInfo->tableEndIndex = -1;
|
pTSInfo->currentGroupId = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__SCAN1) {
|
if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__SCAN1) {
|
||||||
|
|
|
@ -500,34 +500,27 @@ int32_t backendCopyFiles(char* src, char* dst) {
|
||||||
// return 0;
|
// return 0;
|
||||||
}
|
}
|
||||||
int32_t rebuildFromLocalChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) {
|
int32_t rebuildFromLocalChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) {
|
||||||
int32_t code = -1;
|
int32_t code = 0;
|
||||||
int32_t len = strlen(defaultPath) + 32;
|
if (taosIsDir(defaultPath)) {
|
||||||
char* tmp = taosMemoryCalloc(1, len);
|
taosRemoveDir(defaultPath);
|
||||||
sprintf(tmp, "%s%s", defaultPath, "_tmp");
|
|
||||||
|
|
||||||
if (taosIsDir(tmp)) taosRemoveDir(tmp);
|
|
||||||
if (taosIsDir(defaultPath)) taosRenameFile(defaultPath, tmp);
|
|
||||||
|
|
||||||
if (taosIsDir(chkpPath) && isValidCheckpoint(chkpPath)) {
|
|
||||||
if (taosIsDir(tmp)) {
|
|
||||||
taosRemoveDir(tmp);
|
|
||||||
}
|
|
||||||
taosMkDir(defaultPath);
|
taosMkDir(defaultPath);
|
||||||
|
|
||||||
|
stInfo("succ to clear stream backend %s", defaultPath);
|
||||||
|
}
|
||||||
|
if (taosIsDir(chkpPath) && isValidCheckpoint(chkpPath)) {
|
||||||
code = backendCopyFiles(chkpPath, defaultPath);
|
code = backendCopyFiles(chkpPath, defaultPath);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
stError("failed to restart stream backend from %s, reason: %s", chkpPath, tstrerror(TAOS_SYSTEM_ERROR(errno)));
|
taosRemoveDir(defaultPath);
|
||||||
|
taosMkDir(defaultPath);
|
||||||
|
|
||||||
|
stError("failed to restart stream backend from %s, reason: %s, start to restart from empty path: %s", chkpPath,
|
||||||
|
tstrerror(TAOS_SYSTEM_ERROR(errno)), defaultPath);
|
||||||
|
code = 0;
|
||||||
} else {
|
} else {
|
||||||
stInfo("start to restart stream backend at checkpoint path: %s", chkpPath);
|
stInfo("start to restart stream backend at checkpoint path: %s", chkpPath);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (code != 0) {
|
|
||||||
if (taosIsDir(defaultPath)) taosRemoveDir(defaultPath);
|
|
||||||
if (taosIsDir(tmp)) taosRenameFile(tmp, defaultPath);
|
|
||||||
} else {
|
|
||||||
taosRemoveDir(tmp);
|
|
||||||
}
|
|
||||||
|
|
||||||
taosMemoryFree(tmp);
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,6 +90,7 @@ SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKe
|
||||||
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
|
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
|
||||||
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
||||||
pNewPos->needFree = true;
|
pNewPos->needFree = true;
|
||||||
|
pNewPos->beFlushed = true;
|
||||||
memcpy(pNewPos->pRowBuff, p, *pVLen);
|
memcpy(pNewPos->pRowBuff, p, *pVLen);
|
||||||
taosMemoryFree(p);
|
taosMemoryFree(p);
|
||||||
return pNewPos;
|
return pNewPos;
|
||||||
|
@ -217,6 +218,7 @@ int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, v
|
||||||
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
|
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
|
||||||
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
||||||
pNewPos->needFree = true;
|
pNewPos->needFree = true;
|
||||||
|
pNewPos->beFlushed = true;
|
||||||
void* pBuff = NULL;
|
void* pBuff = NULL;
|
||||||
int32_t code = streamStateSessionGet_rocksdb(getStateFileStore(pFileState), pKey, &pBuff, pVLen);
|
int32_t code = streamStateSessionGet_rocksdb(getStateFileStore(pFileState), pKey, &pBuff, pVLen);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -307,6 +309,7 @@ int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStream
|
||||||
}
|
}
|
||||||
pNewPos = getNewRowPosForWrite(pFileState);
|
pNewPos = getNewRowPosForWrite(pFileState);
|
||||||
pNewPos->needFree = true;
|
pNewPos->needFree = true;
|
||||||
|
pNewPos->beFlushed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
|
@ -482,6 +485,7 @@ int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void
|
||||||
SRowBuffPos* pNewPos = getNewRowPosForWrite(pCur->pStreamFileState);
|
SRowBuffPos* pNewPos = getNewRowPosForWrite(pCur->pStreamFileState);
|
||||||
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
||||||
pNewPos->needFree = true;
|
pNewPos->needFree = true;
|
||||||
|
pNewPos->beFlushed = true;
|
||||||
memcpy(pNewPos->pRowBuff, pData, *pVLen);
|
memcpy(pNewPos->pRowBuff, pData, *pVLen);
|
||||||
(*pVal) = pNewPos;
|
(*pVal) = pNewPos;
|
||||||
}
|
}
|
||||||
|
|
|
@ -698,6 +698,7 @@ int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, void
|
||||||
stDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64 ".code:%d", key->win.skey,
|
stDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64 ".code:%d", key->win.skey,
|
||||||
key->win.ekey, key->groupId, code);
|
key->win.ekey, key->groupId, code);
|
||||||
} else {
|
} else {
|
||||||
|
pos->beFlushed = false;
|
||||||
code = putSessionWinResultBuff(pState->pFileState, value);
|
code = putSessionWinResultBuff(pState->pFileState, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -345,9 +345,9 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
|
||||||
|
|
||||||
for (int32_t seq = pSndBuf->cursor + 1; seq < pSndBuf->end; ++seq) {
|
for (int32_t seq = pSndBuf->cursor + 1; seq < pSndBuf->end; ++seq) {
|
||||||
SyncSnapBlock *pBlk = pSndBuf->entries[seq % pSndBuf->size];
|
SyncSnapBlock *pBlk = pSndBuf->entries[seq % pSndBuf->size];
|
||||||
ASSERT(pBlk && !pBlk->acked);
|
ASSERT(pBlk);
|
||||||
int64_t nowMs = taosGetTimestampMs();
|
int64_t nowMs = taosGetTimestampMs();
|
||||||
if (nowMs < pBlk->sendTimeMs + SYNC_SNAP_RESEND_MS) {
|
if (pBlk->acked || nowMs < pBlk->sendTimeMs + SYNC_SNAP_RESEND_MS) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (syncSnapSendMsg(pSender, pBlk->seq, pBlk->pBlock, pBlk->blockLen, 0) != 0) {
|
if (syncSnapSendMsg(pSender, pBlk->seq, pBlk->pBlock, pBlk->blockLen, 0) != 0) {
|
||||||
|
|
|
@ -37,9 +37,6 @@
|
||||||
// This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC)
|
// This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC)
|
||||||
// until 00:00:00 January 1, 1970
|
// until 00:00:00 January 1, 1970
|
||||||
static const uint64_t TIMEEPOCH = ((uint64_t)116444736000000000ULL);
|
static const uint64_t TIMEEPOCH = ((uint64_t)116444736000000000ULL);
|
||||||
// This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC)
|
|
||||||
// until 00:00:00 January 1, 1900
|
|
||||||
static const uint64_t TIMEEPOCH1900 = ((uint64_t)116445024000000000ULL);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do not implement alternate representations. However, we always
|
* We do not implement alternate representations. However, we always
|
||||||
|
@ -360,6 +357,7 @@ int32_t taosGetTimeOfDay(struct timeval *tv) {
|
||||||
t.QuadPart -= TIMEEPOCH;
|
t.QuadPart -= TIMEEPOCH;
|
||||||
tv->tv_sec = t.QuadPart / 10000000;
|
tv->tv_sec = t.QuadPart / 10000000;
|
||||||
tv->tv_usec = (t.QuadPart % 10000000) / 10;
|
tv->tv_usec = (t.QuadPart % 10000000) / 10;
|
||||||
|
return 0;
|
||||||
#else
|
#else
|
||||||
return gettimeofday(tv, NULL);
|
return gettimeofday(tv, NULL);
|
||||||
#endif
|
#endif
|
||||||
|
@ -482,33 +480,51 @@ struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf) {
|
||||||
sprintf(buf, "NaN");
|
sprintf(buf, "NaN");
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
} else if (*timep < 0) {
|
||||||
|
SYSTEMTIME ss, s;
|
||||||
|
FILETIME ff, f;
|
||||||
|
|
||||||
SYSTEMTIME s;
|
LARGE_INTEGER offset;
|
||||||
FILETIME f;
|
struct tm tm1;
|
||||||
LARGE_INTEGER offset;
|
time_t tt = 0;
|
||||||
struct tm tm1;
|
if (localtime_s(&tm1, &tt) != 0) {
|
||||||
time_t tt = 0;
|
if (buf != NULL) {
|
||||||
if (localtime_s(&tm1, &tt) != 0) {
|
sprintf(buf, "NaN");
|
||||||
if (buf != NULL) {
|
}
|
||||||
sprintf(buf, "NaN");
|
return NULL;
|
||||||
|
}
|
||||||
|
ss.wYear = tm1.tm_year + 1900;
|
||||||
|
ss.wMonth = tm1.tm_mon + 1;
|
||||||
|
ss.wDay = tm1.tm_mday;
|
||||||
|
ss.wHour = tm1.tm_hour;
|
||||||
|
ss.wMinute = tm1.tm_min;
|
||||||
|
ss.wSecond = tm1.tm_sec;
|
||||||
|
ss.wMilliseconds = 0;
|
||||||
|
SystemTimeToFileTime(&ss, &ff);
|
||||||
|
offset.QuadPart = ff.dwHighDateTime;
|
||||||
|
offset.QuadPart <<= 32;
|
||||||
|
offset.QuadPart |= ff.dwLowDateTime;
|
||||||
|
offset.QuadPart += *timep * 10000000;
|
||||||
|
f.dwLowDateTime = offset.QuadPart & 0xffffffff;
|
||||||
|
f.dwHighDateTime = (offset.QuadPart >> 32) & 0xffffffff;
|
||||||
|
FileTimeToSystemTime(&f, &s);
|
||||||
|
result->tm_sec = s.wSecond;
|
||||||
|
result->tm_min = s.wMinute;
|
||||||
|
result->tm_hour = s.wHour;
|
||||||
|
result->tm_mday = s.wDay;
|
||||||
|
result->tm_mon = s.wMonth - 1;
|
||||||
|
result->tm_year = s.wYear - 1900;
|
||||||
|
result->tm_wday = s.wDayOfWeek;
|
||||||
|
result->tm_yday = 0;
|
||||||
|
result->tm_isdst = 0;
|
||||||
|
} else {
|
||||||
|
if (localtime_s(result, timep) != 0) {
|
||||||
|
if (buf != NULL) {
|
||||||
|
sprintf(buf, "NaN");
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
offset.QuadPart = TIMEEPOCH1900;
|
|
||||||
offset.QuadPart += *timep * 10000000;
|
|
||||||
f.dwLowDateTime = offset.QuadPart & 0xffffffff;
|
|
||||||
f.dwHighDateTime = (offset.QuadPart >> 32) & 0xffffffff;
|
|
||||||
FileTimeToSystemTime(&f, &s);
|
|
||||||
result->tm_sec = s.wSecond;
|
|
||||||
result->tm_min = s.wMinute;
|
|
||||||
result->tm_hour = s.wHour;
|
|
||||||
result->tm_mday = s.wDay;
|
|
||||||
result->tm_mon = s.wMonth - 1;
|
|
||||||
result->tm_year = s.wYear - 1900;
|
|
||||||
result->tm_wday = s.wDayOfWeek;
|
|
||||||
result->tm_yday = 0;
|
|
||||||
result->tm_isdst = 0;
|
|
||||||
#else
|
#else
|
||||||
res = localtime_r(timep, result);
|
res = localtime_r(timep, result);
|
||||||
if (res == NULL && buf != NULL) {
|
if (res == NULL && buf != NULL) {
|
||||||
|
|
|
@ -0,0 +1,105 @@
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import glob
|
||||||
|
import shutil
|
||||||
|
import time
|
||||||
|
|
||||||
|
from frame.log import *
|
||||||
|
from frame.cases import *
|
||||||
|
from frame.sql import *
|
||||||
|
from frame.caseBase import *
|
||||||
|
from frame import *
|
||||||
|
from frame.autogen import *
|
||||||
|
from frame.server.dnodes import *
|
||||||
|
from frame.server.cluster import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase(TBase):
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=3):
|
||||||
|
super(TDTestCase, self).init(conn, logSql, replicaVar=3, db="snapshot", checkColName="c1")
|
||||||
|
self.valgrind = 0
|
||||||
|
self.childtable_count = 10
|
||||||
|
# tdSql.init(conn.cursor())
|
||||||
|
tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
autoGen = AutoGen()
|
||||||
|
autoGen.create_db(self.db, 2, 3)
|
||||||
|
tdSql.execute(f"use {self.db}")
|
||||||
|
autoGen.create_stable(self.stb, 5, 10, 8, 8)
|
||||||
|
autoGen.create_child(self.stb, "d", self.childtable_count)
|
||||||
|
autoGen.insert_data(1000)
|
||||||
|
tdSql.execute(f"flush database {self.db}")
|
||||||
|
clusterDnodes.stoptaosd(3)
|
||||||
|
# clusterDnodes.stoptaosd(1)
|
||||||
|
# clusterDnodes.starttaosd(3)
|
||||||
|
# time.sleep(5)
|
||||||
|
# clusterDnodes.stoptaosd(2)
|
||||||
|
# clusterDnodes.starttaosd(1)
|
||||||
|
# time.sleep(5)
|
||||||
|
autoGen.insert_data(5000, True)
|
||||||
|
tdSql.execute(f"flush database {self.db}")
|
||||||
|
|
||||||
|
# sql = 'show vnodes;'
|
||||||
|
# while True:
|
||||||
|
# bFinish = True
|
||||||
|
# param_list = tdSql.query(sql, row_tag=True)
|
||||||
|
# for param in param_list:
|
||||||
|
# if param[3] == 'leading' or param[3] == 'following':
|
||||||
|
# bFinish = False
|
||||||
|
# break
|
||||||
|
# if bFinish:
|
||||||
|
# break
|
||||||
|
self.snapshotAgg()
|
||||||
|
time.sleep(10)
|
||||||
|
clusterDnodes.stopAll()
|
||||||
|
for i in range(1, 4):
|
||||||
|
path = clusterDnodes.getDnodeDir(i)
|
||||||
|
dnodesRootDir = os.path.join(path,"data","vnode", "vnode*")
|
||||||
|
dirs = glob.glob(dnodesRootDir)
|
||||||
|
for dir in dirs:
|
||||||
|
if os.path.isdir(dir):
|
||||||
|
tdLog.debug("delete dir: %s " % (dnodesRootDir))
|
||||||
|
self.remove_directory(os.path.join(dir, "wal"))
|
||||||
|
|
||||||
|
clusterDnodes.starttaosd(1)
|
||||||
|
clusterDnodes.starttaosd(2)
|
||||||
|
clusterDnodes.starttaosd(3)
|
||||||
|
sql = "show vnodes;"
|
||||||
|
time.sleep(10)
|
||||||
|
while True:
|
||||||
|
bFinish = True
|
||||||
|
param_list = tdSql.query(sql, row_tag=True)
|
||||||
|
for param in param_list:
|
||||||
|
if param[3] == 'offline':
|
||||||
|
tdLog.exit(
|
||||||
|
"dnode synchronous fail dnode id: %d, vgroup id:%d status offline" % (param[0], param[1]))
|
||||||
|
if param[3] == 'leading' or param[3] == 'following':
|
||||||
|
bFinish = False
|
||||||
|
break
|
||||||
|
if bFinish:
|
||||||
|
break
|
||||||
|
|
||||||
|
self.timestamp_step = 1
|
||||||
|
self.insert_rows = 6000
|
||||||
|
self.checkInsertCorrect()
|
||||||
|
self.checkAggCorrect()
|
||||||
|
|
||||||
|
def remove_directory(self, directory):
|
||||||
|
try:
|
||||||
|
shutil.rmtree(directory)
|
||||||
|
tdLog.debug("delete dir: %s " % (directory))
|
||||||
|
except OSError as e:
|
||||||
|
tdLog.exit("delete fail dir: %s " % (directory))
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -119,6 +119,8 @@ class TDTestCase(TBase):
|
||||||
# stop taosd test taos as server
|
# stop taosd test taos as server
|
||||||
sc.dnodeStop(idx)
|
sc.dnodeStop(idx)
|
||||||
etool.exeBinFile("taos", f'-n server', wait=False)
|
etool.exeBinFile("taos", f'-n server', wait=False)
|
||||||
|
time.sleep(3)
|
||||||
|
eos.exe("pkill -9 taos")
|
||||||
|
|
||||||
# run
|
# run
|
||||||
def run(self):
|
def run(self):
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
{
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"connection_pool_size": 8,
|
||||||
|
"num_of_records_per_req": 4000,
|
||||||
|
"prepared_rand": 10000,
|
||||||
|
"thread_count": 3,
|
||||||
|
"create_table_thread_count": 1,
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"databases": [
|
||||||
|
{
|
||||||
|
"dbinfo": {
|
||||||
|
"name": "db",
|
||||||
|
"drop": "no",
|
||||||
|
"vgroups": 3,
|
||||||
|
"replica": 3,
|
||||||
|
"duration":"3d",
|
||||||
|
"wal_retention_period": 1,
|
||||||
|
"wal_retention_size": 1,
|
||||||
|
"stt_trigger": 1
|
||||||
|
},
|
||||||
|
"super_tables": [
|
||||||
|
{
|
||||||
|
"name": "stb",
|
||||||
|
"child_table_exists": "yes",
|
||||||
|
"childtable_count": 6,
|
||||||
|
"insert_rows": 50000,
|
||||||
|
"childtable_prefix": "d",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"timestamp_step": 60000,
|
||||||
|
"start_timestamp":1700000000000,
|
||||||
|
"columns": [
|
||||||
|
{ "type": "bool", "name": "bc"},
|
||||||
|
{ "type": "float", "name": "fc" },
|
||||||
|
{ "type": "double", "name": "dc"},
|
||||||
|
{ "type": "tinyint", "name": "ti"},
|
||||||
|
{ "type": "smallint", "name": "si" },
|
||||||
|
{ "type": "int", "name": "ic" },
|
||||||
|
{ "type": "bigint", "name": "bi" },
|
||||||
|
{ "type": "utinyint", "name": "uti"},
|
||||||
|
{ "type": "usmallint", "name": "usi"},
|
||||||
|
{ "type": "uint", "name": "ui" },
|
||||||
|
{ "type": "ubigint", "name": "ubi"},
|
||||||
|
{ "type": "binary", "name": "bin", "len": 8},
|
||||||
|
{ "type": "nchar", "name": "nch", "len": 16}
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
|
||||||
|
{"name": "location","type": "binary", "len": 16, "values":
|
||||||
|
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -188,11 +188,6 @@ class TDTestCase(TBase):
|
||||||
sql_list.append(item["sql"])
|
sql_list.append(item["sql"])
|
||||||
res_list.append(item["res"])
|
res_list.append(item["res"])
|
||||||
tdSql.queryAndCheckResult(sql_list, res_list)
|
tdSql.queryAndCheckResult(sql_list, res_list)
|
||||||
# tdSql.query(q["sql"])
|
|
||||||
# if len(q["res"]) == 0:
|
|
||||||
# assert(len(tdSql.queryResult) == len(q["res"]))
|
|
||||||
# else:
|
|
||||||
# assert(len(tdSql.queryResult) == len(q["res"]) and tdSql.queryResult == q["res"])
|
|
||||||
|
|
||||||
def test_query_with_other_function(self):
|
def test_query_with_other_function(self):
|
||||||
query_list = [
|
query_list = [
|
||||||
|
@ -211,9 +206,6 @@ class TDTestCase(TBase):
|
||||||
sql_list.append(item["sql"])
|
sql_list.append(item["sql"])
|
||||||
res_list.append(item["res"])
|
res_list.append(item["res"])
|
||||||
tdSql.queryAndCheckResult(sql_list, res_list)
|
tdSql.queryAndCheckResult(sql_list, res_list)
|
||||||
# for q in query_list:
|
|
||||||
# tdSql.query(q["sql"])
|
|
||||||
# assert(len(tdSql.queryResult) == len(q["res"]) and tdSql.queryResult == q["res"])
|
|
||||||
|
|
||||||
def test_query_with_join(self):
|
def test_query_with_join(self):
|
||||||
query_list = [
|
query_list = [
|
||||||
|
@ -268,9 +260,6 @@ class TDTestCase(TBase):
|
||||||
sql_list.append(item["sql"])
|
sql_list.append(item["sql"])
|
||||||
res_list.append(item["res"])
|
res_list.append(item["res"])
|
||||||
tdSql.queryAndCheckResult(sql_list, res_list)
|
tdSql.queryAndCheckResult(sql_list, res_list)
|
||||||
# for q in query_list:
|
|
||||||
# tdSql.query(q["sql"])
|
|
||||||
# assert(len(tdSql.queryResult) == len(q["res"]) and tdSql.queryResult == q["res"])
|
|
||||||
|
|
||||||
def test_query_with_union(self):
|
def test_query_with_union(self):
|
||||||
query_list = [
|
query_list = [
|
||||||
|
@ -333,10 +322,6 @@ class TDTestCase(TBase):
|
||||||
sql_list.append(item["sql"])
|
sql_list.append(item["sql"])
|
||||||
res_list.append(item["res"])
|
res_list.append(item["res"])
|
||||||
tdSql.queryAndCheckResult(sql_list, res_list)
|
tdSql.queryAndCheckResult(sql_list, res_list)
|
||||||
# for q in query_list:
|
|
||||||
# tdSql.query(q["sql"])
|
|
||||||
# tdLog.debug(q["sql"] + " with res: " + str(tdSql.queryResult))
|
|
||||||
# assert(len(tdSql.queryResult) == len(q["res"]) and tdSql.queryResult == q["res"])
|
|
||||||
|
|
||||||
def test_query_with_window(self):
|
def test_query_with_window(self):
|
||||||
query_list = [
|
query_list = [
|
||||||
|
@ -363,9 +348,6 @@ class TDTestCase(TBase):
|
||||||
sql_list.append(item["sql"])
|
sql_list.append(item["sql"])
|
||||||
res_list.append(item["res"])
|
res_list.append(item["res"])
|
||||||
tdSql.queryAndCheckResult(sql_list, res_list)
|
tdSql.queryAndCheckResult(sql_list, res_list)
|
||||||
# for q in query_list:
|
|
||||||
# tdSql.query(q["sql"])
|
|
||||||
# assert(len(tdSql.queryResult) == len(q["res"]) and tdSql.queryResult == q["res"])
|
|
||||||
|
|
||||||
def test_nested_query(self):
|
def test_nested_query(self):
|
||||||
query_list = [
|
query_list = [
|
||||||
|
@ -396,10 +378,6 @@ class TDTestCase(TBase):
|
||||||
sql_list.append(item["sql"])
|
sql_list.append(item["sql"])
|
||||||
res_list.append(item["res"])
|
res_list.append(item["res"])
|
||||||
tdSql.queryAndCheckResult(sql_list, res_list)
|
tdSql.queryAndCheckResult(sql_list, res_list)
|
||||||
# for q in query_list:
|
|
||||||
# tdSql.query(q["sql"])
|
|
||||||
# tdLog.debug(q["sql"] + " with res: " + str(tdSql.queryResult))
|
|
||||||
# assert(len(tdSql.queryResult) == len(q["res"]) and tdSql.queryResult == q["res"])
|
|
||||||
|
|
||||||
def test_abnormal_query(self):
|
def test_abnormal_query(self):
|
||||||
# incorrect parameter
|
# incorrect parameter
|
||||||
|
@ -415,32 +393,42 @@ class TDTestCase(TBase):
|
||||||
tdSql.error("select elapsed{} from {} group by ".format(param, table))
|
tdSql.error("select elapsed{} from {} group by ".format(param, table))
|
||||||
|
|
||||||
# query with unsupported function, like leastsquares、diff、derivative、top、bottom、last_row、interp
|
# query with unsupported function, like leastsquares、diff、derivative、top、bottom、last_row、interp
|
||||||
tdSql.error("select elapsed(leastsquares(c_int, 1, 2)) from st1 group by tbname;")
|
unsupported_sql_list = [
|
||||||
tdSql.error("select elapsed(diff(ts)) from st1;")
|
"select elapsed(leastsquares(c_int, 1, 2)) from st1 group by tbname;",
|
||||||
tdSql.error("select elapsed(derivative(ts, 1s, 1)) from st1 group by tbname order by ts;")
|
"select elapsed(diff(ts)) from st1;",
|
||||||
tdSql.error("select elapsed(top(ts, 5)) from st1 group by tbname order by ts;")
|
"select elapsed(derivative(ts, 1s, 1)) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select top(elapsed(ts), 5) from st1 group by tbname order by ts;")
|
"select elapsed(top(ts, 5)) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(bottom(ts)) from st1 group by tbname order by ts;")
|
"select top(elapsed(ts), 5) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select bottom(elapsed(ts)) from st1 group by tbname order by ts;")
|
"select elapsed(bottom(ts)) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(last_row(ts)) from st1 group by tbname order by ts;")
|
"select bottom(elapsed(ts)) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(interp(ts, 0)) from st1 group by tbname order by ts;")
|
"select elapsed(last_row(ts)) from st1 group by tbname order by ts;",
|
||||||
|
"select elapsed(interp(ts, 0)) from st1 group by tbname order by ts;"
|
||||||
|
]
|
||||||
|
tdSql.errors(unsupported_sql_list)
|
||||||
|
|
||||||
# nested aggregate function
|
# nested aggregate function
|
||||||
tdSql.error("select avg(elapsed(ts, 1s)) from st1 group by tbname order by ts;")
|
nested_sql_list = [
|
||||||
tdSql.error("select elapsed(avg(ts), 1s) from st1 group by tbname order by ts;")
|
"select avg(elapsed(ts, 1s)) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(sum(ts), 1s) from st1 group by tbname order by ts;")
|
"select elapsed(avg(ts), 1s) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(count(ts), 1s) from st1 group by tbname order by ts;")
|
"select elapsed(sum(ts), 1s) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(min(ts), 1s) from st1 group by tbname order by ts;")
|
"select elapsed(count(ts), 1s) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(max(ts), 1s) from st1 group by tbname order by ts;")
|
"select elapsed(min(ts), 1s) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(first(ts), 1s) from st1 group by tbname order by ts;")
|
"select elapsed(max(ts), 1s) from st1 group by tbname order by ts;",
|
||||||
tdSql.error("select elapsed(last(ts), 1s) from st1 group by tbname order by ts;")
|
"select elapsed(first(ts), 1s) from st1 group by tbname order by ts;",
|
||||||
|
"select elapsed(last(ts), 1s) from st1 group by tbname order by ts;"
|
||||||
|
]
|
||||||
|
tdSql.errors(nested_sql_list)
|
||||||
|
|
||||||
# other error
|
# other error
|
||||||
tdSql.error("select elapsed(ts, 1s) from t1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000' interval(10s) fill(next) union select elapsed(ts, 1s) from st2 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:49.000' interval(5s) fill(prev) group by tbname;")
|
other_sql_list = [
|
||||||
tdSql.error("select elapsed(time ,1s) from (select elapsed(ts,1s) time from st1);")
|
"select elapsed(ts, 1s) from t1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000' interval(10s) fill(next) union select elapsed(ts, 1s) from st2 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:49.000' interval(5s) fill(prev) group by tbname;",
|
||||||
tdSql.error("select elapsed(ts, 1s) from (select elapsed(ts, 1s) ts from st2);")
|
"select elapsed(time ,1s) from (select elapsed(ts,1s) time from st1);",
|
||||||
tdSql.error("select elapsed(time, 1s) from (select elapsed(ts, 1s) time from st1 group by tbname);")
|
"select elapsed(ts , 1s) from (select elapsed(ts, 1s) ts from st2);",
|
||||||
tdSql.error("select elapsed(ts , 1s) from (select elapsed(ts, 1s) ts from st2 group by tbname);")
|
"select elapsed(time, 1s) from (select elapsed(ts, 1s) time from st1 group by tbname);",
|
||||||
tdSql.error("select elapsed(ts, 1s) from (select * from st1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000' interval(10s) fill(next)) where c_int > 10;")
|
"select elapsed(ts , 1s) from (select elapsed(ts, 1s) ts from st2 group by tbname);",
|
||||||
|
"select elapsed(ts, 1s) from (select * from st1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000' interval(10s) fill(next)) where c_int > 10;"
|
||||||
|
]
|
||||||
|
tdSql.errors(other_sql_list)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.prepareData()
|
self.prepareData()
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
"childtable_prefix": "d",
|
"childtable_prefix": "d",
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"timestamp_step": 30000,
|
"timestamp_step": 30000,
|
||||||
"start_timestamp":"2023-10-01 10:00:00",
|
"start_timestamp":1700000000000,
|
||||||
"columns": [
|
"columns": [
|
||||||
{ "type": "bool", "name": "bc"},
|
{ "type": "bool", "name": "bc"},
|
||||||
{ "type": "float", "name": "fc" },
|
{ "type": "float", "name": "fc" },
|
||||||
|
|
|
@ -34,31 +34,179 @@ class TDTestCase(TBase):
|
||||||
"querySmaOptimize": "1"
|
"querySmaOptimize": "1"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def insertData(self):
|
def insertData(self):
|
||||||
tdLog.info(f"insert data.")
|
tdLog.info(f"insert data.")
|
||||||
# taosBenchmark run
|
# taosBenchmark run
|
||||||
jfile = etool.curFile(__file__, "query_basic.json")
|
jfile = etool.curFile(__file__, "query_basic.json")
|
||||||
etool.benchMark(json=jfile)
|
etool.benchMark(json = jfile)
|
||||||
|
|
||||||
tdSql.execute(f"use {self.db}")
|
tdSql.execute(f"use {self.db}")
|
||||||
tdSql.execute("select database();")
|
tdSql.execute("select database();")
|
||||||
# set insert data information
|
# come from query_basic.json
|
||||||
self.childtable_count = 6
|
self.childtable_count = 6
|
||||||
self.insert_rows = 100000
|
self.insert_rows = 100000
|
||||||
self.timestamp_step = 30000
|
self.timestamp_step = 30000
|
||||||
|
self.start_timestamp = 1700000000000
|
||||||
|
|
||||||
|
# write again disorder
|
||||||
|
self.flushDb()
|
||||||
|
jfile = etool.curFile(__file__, "cquery_basic.json")
|
||||||
|
etool.benchMark(json = jfile)
|
||||||
|
|
||||||
|
|
||||||
|
def genTime(self, preCnt, cnt):
|
||||||
|
start = self.start_timestamp + preCnt * self.timestamp_step
|
||||||
|
end = start + self.timestamp_step * cnt
|
||||||
|
return (start, end)
|
||||||
|
|
||||||
|
|
||||||
|
def doWindowQuery(self):
|
||||||
|
pre = f"select count(ts) from {self.stb} "
|
||||||
|
# case1 operator "in" "and" is same
|
||||||
|
cnt = 6000
|
||||||
|
s,e = self.genTime(12000, cnt)
|
||||||
|
sql1 = f"{pre} where ts between {s} and {e} "
|
||||||
|
sql2 = f"{pre} where ts >= {s} and ts <={e} "
|
||||||
|
expectCnt = (cnt + 1) * self.childtable_count
|
||||||
|
tdSql.checkFirstValue(sql1, expectCnt)
|
||||||
|
tdSql.checkFirstValue(sql2, expectCnt)
|
||||||
|
|
||||||
|
# case2 no overloap "or" left
|
||||||
|
cnt1 = 120
|
||||||
|
s1, e1 = self.genTime(4000, cnt1)
|
||||||
|
cnt2 = 3000
|
||||||
|
s2, e2 = self.genTime(10000, cnt2)
|
||||||
|
sql = f"{pre} where (ts >= {s1} and ts < {e1}) or (ts >= {s2} and ts < {e2})"
|
||||||
|
expectCnt = (cnt1 + cnt2) * self.childtable_count
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
# case3 overloap "or" right
|
||||||
|
cnt1 = 300
|
||||||
|
s1, e1 = self.genTime(17000, cnt1)
|
||||||
|
cnt2 = 8000
|
||||||
|
s2, e2 = self.genTime(70000, cnt2)
|
||||||
|
sql = f"{pre} where (ts > {s1} and ts <= {e1}) or (ts > {s2} and ts <= {e2})"
|
||||||
|
expectCnt = (cnt1 + cnt2) * self.childtable_count
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
# case4 overloap "or"
|
||||||
|
cnt1 = 1000
|
||||||
|
s1, e1 = self.genTime(9000, cnt1)
|
||||||
|
cnt2 = 1000
|
||||||
|
s2, e2 = self.genTime(9000 + 500 , cnt2)
|
||||||
|
sql = f"{pre} where (ts > {s1} and ts <= {e1}) or (ts > {s2} and ts <= {e2})"
|
||||||
|
expectCnt = (cnt1 + 500) * self.childtable_count # expect=1500
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
# case5 overloap "or" boundary hollow->solid
|
||||||
|
cnt1 = 3000
|
||||||
|
s1, e1 = self.genTime(45000, cnt1)
|
||||||
|
cnt2 = 2000
|
||||||
|
s2, e2 = self.genTime(45000 + cnt1 , cnt2)
|
||||||
|
sql = f"{pre} where (ts > {s1} and ts <= {e1}) or (ts > {s2} and ts <= {e2})"
|
||||||
|
expectCnt = (cnt1+cnt2) * self.childtable_count
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
# case6 overloap "or" boundary solid->solid
|
||||||
|
cnt1 = 300
|
||||||
|
s1, e1 = self.genTime(55000, cnt1)
|
||||||
|
cnt2 = 500
|
||||||
|
s2, e2 = self.genTime(55000 + cnt1 , cnt2)
|
||||||
|
sql = f"{pre} where (ts >= {s1} and ts <= {e1}) or (ts >= {s2} and ts <= {e2})"
|
||||||
|
expectCnt = (cnt1+cnt2+1) * self.childtable_count
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
# case7 overloap "and"
|
||||||
|
cnt1 = 1000
|
||||||
|
s1, e1 = self.genTime(40000, cnt1)
|
||||||
|
cnt2 = 1000
|
||||||
|
s2, e2 = self.genTime(40000 + 500 , cnt2)
|
||||||
|
sql = f"{pre} where (ts > {s1} and ts <= {e1}) and (ts > {s2} and ts <= {e2})"
|
||||||
|
expectCnt = cnt1/2 * self.childtable_count
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
# case8 overloap "and" boundary hollow->solid solid->hollow
|
||||||
|
cnt1 = 3000
|
||||||
|
s1, e1 = self.genTime(45000, cnt1)
|
||||||
|
cnt2 = 2000
|
||||||
|
s2, e2 = self.genTime(45000 + cnt1 , cnt2)
|
||||||
|
sql = f"{pre} where (ts > {s1} and ts <= {e1}) and (ts >= {s2} and ts < {e2})"
|
||||||
|
expectCnt = 1 * self.childtable_count
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
# case9 no overloap "and"
|
||||||
|
cnt1 = 6000
|
||||||
|
s1, e1 = self.genTime(20000, cnt1)
|
||||||
|
cnt2 = 300
|
||||||
|
s2, e2 = self.genTime(70000, cnt2)
|
||||||
|
sql = f"{pre} where (ts > {s1} and ts <= {e1}) and (ts >= {s2} and ts <= {e2})"
|
||||||
|
expectCnt = 0
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
# case10 cnt1 contain cnt2 and
|
||||||
|
cnt1 = 5000
|
||||||
|
s1, e1 = self.genTime(25000, cnt1)
|
||||||
|
cnt2 = 400
|
||||||
|
s2, e2 = self.genTime(28000, cnt2)
|
||||||
|
sql = f"{pre} where (ts > {s1} and ts <= {e1}) and (ts >= {s2} and ts < {e2})"
|
||||||
|
expectCnt = cnt2 * self.childtable_count
|
||||||
|
tdSql.checkFirstValue(sql, expectCnt)
|
||||||
|
|
||||||
|
|
||||||
|
def queryMax(self, colname):
|
||||||
|
sql = f"select max({colname}) from {self.stb}"
|
||||||
|
tdSql.query(sql)
|
||||||
|
return tdSql.getData(0, 0)
|
||||||
|
|
||||||
|
|
||||||
|
def checkMax(self):
|
||||||
|
# max for tsdbRetrieveDatablockSMA2 coverage
|
||||||
|
colname = "ui"
|
||||||
|
max = self.queryMax(colname)
|
||||||
|
|
||||||
|
# insert over max
|
||||||
|
sql = f"insert into d0(ts, {colname}) values"
|
||||||
|
for i in range(1, 5):
|
||||||
|
sql += f" (now + {i}s, {max+i})"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
self.flushDb()
|
||||||
|
|
||||||
|
expectMax = max + 4
|
||||||
|
for i in range(1, 5):
|
||||||
|
realMax = self.queryMax(colname)
|
||||||
|
if realMax != expectMax:
|
||||||
|
tdLog.exit(f"Max value not expect. expect:{expectMax} real:{realMax}")
|
||||||
|
|
||||||
|
# query ts list
|
||||||
|
sql = f"select ts from d0 where ui={expectMax}"
|
||||||
|
tdSql.query(sql)
|
||||||
|
tss = tdSql.getColData(0)
|
||||||
|
for ts in tss:
|
||||||
|
# delete
|
||||||
|
sql = f"delete from d0 where ts = '{ts}'"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
expectMax -= 1
|
||||||
|
|
||||||
|
self.checkInsertCorrect()
|
||||||
|
|
||||||
|
|
||||||
def doQuery(self):
|
def doQuery(self):
|
||||||
tdLog.info(f"do query.")
|
tdLog.info(f"do query.")
|
||||||
|
self.doWindowQuery()
|
||||||
# __group_key
|
|
||||||
sql = f"select count(*),_group_key(uti),uti from {self.stb} partition by uti;"
|
|
||||||
tdSql.execute(sql)
|
|
||||||
tdSql.checkRows(251)
|
|
||||||
|
|
||||||
sql = f"select count(*),_group_key(usi) from {self.stb} group by usi;"
|
# max
|
||||||
tdSql.execute(sql)
|
self.checkMax()
|
||||||
tdSql.checkRows(997)
|
|
||||||
|
# __group_key
|
||||||
|
sql = f"select count(*),_group_key(uti),uti from {self.stb} partition by uti"
|
||||||
|
tdSql.query(sql)
|
||||||
|
# column index 1 value same with 2
|
||||||
|
tdSql.checkSameColumn(1, 2)
|
||||||
|
|
||||||
|
sql = f"select count(*),_group_key(usi),usi from {self.stb} group by usi limit 100;"
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkSameColumn(1, 2)
|
||||||
|
|
||||||
# tail
|
# tail
|
||||||
sql1 = "select ts,ui from d0 order by ts desc limit 5 offset 2;"
|
sql1 = "select ts,ui from d0 order by ts desc limit 5 offset 2;"
|
||||||
|
@ -75,6 +223,20 @@ class TDTestCase(TBase):
|
||||||
sql2 = "select bi from stb where bi is not null order by bi desc limit 10;"
|
sql2 = "select bi from stb where bi is not null order by bi desc limit 10;"
|
||||||
self.checkSameResult(sql1, sql2)
|
self.checkSameResult(sql1, sql2)
|
||||||
|
|
||||||
|
# distributed expect values
|
||||||
|
expects = {
|
||||||
|
"Block_Rows" : 6*100000,
|
||||||
|
"Total_Tables" : 6,
|
||||||
|
"Total_Vgroups" : 3
|
||||||
|
}
|
||||||
|
self.waitTransactionZero()
|
||||||
|
reals = self.getDistributed(self.stb)
|
||||||
|
for k in expects.keys():
|
||||||
|
v = expects[k]
|
||||||
|
if int(reals[k]) != v:
|
||||||
|
tdLog.exit(f"distribute {k} expect: {v} real: {reals[k]}")
|
||||||
|
|
||||||
|
|
||||||
# run
|
# run
|
||||||
def run(self):
|
def run(self):
|
||||||
tdLog.debug(f"start to excute {__file__}")
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
|
|
@ -162,14 +162,19 @@ class AutoGen:
|
||||||
tdLog.info(f" insert data i={i}")
|
tdLog.info(f" insert data i={i}")
|
||||||
values = ""
|
values = ""
|
||||||
|
|
||||||
tdLog.info(f" insert child data {child_name} finished, insert rows={cnt}")
|
tdLog.info(f" insert child data {child_name} finished, insert rows={cnt}")
|
||||||
|
return ts
|
||||||
|
|
||||||
# insert data
|
def insert_data(self, cnt, bContinue=False):
|
||||||
def insert_data(self, cnt):
|
if not bContinue:
|
||||||
|
self.ts = 1600000000000
|
||||||
|
|
||||||
|
currTs = 1600000000000
|
||||||
for i in range(self.child_cnt):
|
for i in range(self.child_cnt):
|
||||||
name = f"{self.child_name}{i}"
|
name = f"{self.child_name}{i}"
|
||||||
self.insert_data_child(name, cnt, self.batch_size, 1)
|
currTs = self.insert_data_child(name, cnt, self.batch_size, 1)
|
||||||
|
|
||||||
|
self.ts = currTs
|
||||||
tdLog.info(f" insert data ok, child table={self.child_cnt} insert rows={cnt}")
|
tdLog.info(f" insert data ok, child table={self.child_cnt} insert rows={cnt}")
|
||||||
|
|
||||||
# insert same timestamp to all childs
|
# insert same timestamp to all childs
|
||||||
|
|
|
@ -29,7 +29,7 @@ class TBase:
|
||||||
#
|
#
|
||||||
|
|
||||||
# init
|
# init
|
||||||
def init(self, conn, logSql, replicaVar=1):
|
def init(self, conn, logSql, replicaVar=1, db="db", stb="stb", checkColName="ic"):
|
||||||
# save param
|
# save param
|
||||||
self.replicaVar = int(replicaVar)
|
self.replicaVar = int(replicaVar)
|
||||||
tdSql.init(conn.cursor(), True)
|
tdSql.init(conn.cursor(), True)
|
||||||
|
@ -41,14 +41,14 @@ class TBase:
|
||||||
self.mLevelDisk = 0
|
self.mLevelDisk = 0
|
||||||
|
|
||||||
# test case information
|
# test case information
|
||||||
self.db = "db"
|
self.db = db
|
||||||
self.stb = "stb"
|
self.stb = stb
|
||||||
|
|
||||||
# sql
|
# sql
|
||||||
self.sqlSum = f"select sum(ic) from {self.stb}"
|
self.sqlSum = f"select sum({checkColName}) from {self.stb}"
|
||||||
self.sqlMax = f"select max(ic) from {self.stb}"
|
self.sqlMax = f"select max({checkColName}) from {self.stb}"
|
||||||
self.sqlMin = f"select min(ic) from {self.stb}"
|
self.sqlMin = f"select min({checkColName}) from {self.stb}"
|
||||||
self.sqlAvg = f"select avg(ic) from {self.stb}"
|
self.sqlAvg = f"select avg({checkColName}) from {self.stb}"
|
||||||
self.sqlFirst = f"select first(ts) from {self.stb}"
|
self.sqlFirst = f"select first(ts) from {self.stb}"
|
||||||
self.sqlLast = f"select last(ts) from {self.stb}"
|
self.sqlLast = f"select last(ts) from {self.stb}"
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ class TBase:
|
||||||
tdSql.checkAgg(sql, self.childtable_count)
|
tdSql.checkAgg(sql, self.childtable_count)
|
||||||
|
|
||||||
# check step
|
# check step
|
||||||
sql = f"select * from (select diff(ts) as dif from {self.stb} partition by tbname) where dif != {self.timestamp_step}"
|
sql = f"select * from (select diff(ts) as dif from {self.stb} partition by tbname order by ts desc) where dif != {self.timestamp_step}"
|
||||||
tdSql.query(sql)
|
tdSql.query(sql)
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
|
@ -193,10 +193,10 @@ class TBase:
|
||||||
|
|
||||||
# sql
|
# sql
|
||||||
rows1 = tdSql.query(sql1,queryTimes=2)
|
rows1 = tdSql.query(sql1,queryTimes=2)
|
||||||
res1 = copy.deepcopy(tdSql.queryResult)
|
res1 = copy.deepcopy(tdSql.res)
|
||||||
|
|
||||||
tdSql.query(sql2,queryTimes=2)
|
tdSql.query(sql2,queryTimes=2)
|
||||||
res2 = tdSql.queryResult
|
res2 = tdSql.res
|
||||||
|
|
||||||
rowlen1 = len(res1)
|
rowlen1 = len(res1)
|
||||||
rowlen2 = len(res2)
|
rowlen2 = len(res2)
|
||||||
|
@ -229,9 +229,9 @@ class TBase:
|
||||||
#
|
#
|
||||||
|
|
||||||
# get vgroups
|
# get vgroups
|
||||||
def getVGroup(self, db_name):
|
def getVGroup(self, dbName):
|
||||||
vgidList = []
|
vgidList = []
|
||||||
sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{db_name}'"
|
sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{dbName}'"
|
||||||
res = tdSql.getResult(sql)
|
res = tdSql.getResult(sql)
|
||||||
rows = len(res)
|
rows = len(res)
|
||||||
for i in range(rows):
|
for i in range(rows):
|
||||||
|
@ -239,6 +239,29 @@ class TBase:
|
||||||
|
|
||||||
return vgidList
|
return vgidList
|
||||||
|
|
||||||
|
# get distributed rows
|
||||||
|
def getDistributed(self, tbName):
|
||||||
|
sql = f"show table distributed {tbName}"
|
||||||
|
tdSql.query(sql)
|
||||||
|
dics = {}
|
||||||
|
i = 0
|
||||||
|
for i in range(tdSql.getRows()):
|
||||||
|
row = tdSql.getData(i, 0)
|
||||||
|
#print(row)
|
||||||
|
row = row.replace('[', '').replace(']', '')
|
||||||
|
#print(row)
|
||||||
|
items = row.split(' ')
|
||||||
|
#print(items)
|
||||||
|
for item in items:
|
||||||
|
#print(item)
|
||||||
|
v = item.split('=')
|
||||||
|
#print(v)
|
||||||
|
if len(v) == 2:
|
||||||
|
dics[v[0]] = v[1]
|
||||||
|
if i > 5:
|
||||||
|
break
|
||||||
|
print(dics)
|
||||||
|
return dics
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -269,3 +292,15 @@ class TBase:
|
||||||
if len(lists) == 0:
|
if len(lists) == 0:
|
||||||
tdLog.exit(f"list is empty {tips}")
|
tdLog.exit(f"list is empty {tips}")
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# str util
|
||||||
|
#
|
||||||
|
# covert list to sql format string
|
||||||
|
def listSql(self, lists, sepa = ","):
|
||||||
|
strs = ""
|
||||||
|
for ls in lists:
|
||||||
|
if strs != "":
|
||||||
|
strs += sepa
|
||||||
|
strs += f"'{ls}'"
|
||||||
|
return strs
|
|
@ -795,7 +795,7 @@ class TDCom:
|
||||||
def getOneRow(self, location, containElm):
|
def getOneRow(self, location, containElm):
|
||||||
res_list = list()
|
res_list = list()
|
||||||
if 0 <= location < tdSql.queryRows:
|
if 0 <= location < tdSql.queryRows:
|
||||||
for row in tdSql.queryResult:
|
for row in tdSql.res:
|
||||||
if row[location] == containElm:
|
if row[location] == containElm:
|
||||||
res_list.append(row)
|
res_list.append(row)
|
||||||
return res_list
|
return res_list
|
||||||
|
@ -943,7 +943,7 @@ class TDCom:
|
||||||
"""drop all streams
|
"""drop all streams
|
||||||
"""
|
"""
|
||||||
tdSql.query("show streams")
|
tdSql.query("show streams")
|
||||||
stream_name_list = list(map(lambda x: x[0], tdSql.queryResult))
|
stream_name_list = list(map(lambda x: x[0], tdSql.res))
|
||||||
for stream_name in stream_name_list:
|
for stream_name in stream_name_list:
|
||||||
tdSql.execute(f'drop stream if exists {stream_name};')
|
tdSql.execute(f'drop stream if exists {stream_name};')
|
||||||
|
|
||||||
|
@ -962,7 +962,7 @@ class TDCom:
|
||||||
"""drop all databases
|
"""drop all databases
|
||||||
"""
|
"""
|
||||||
tdSql.query("show databases;")
|
tdSql.query("show databases;")
|
||||||
db_list = list(map(lambda x: x[0], tdSql.queryResult))
|
db_list = list(map(lambda x: x[0], tdSql.res))
|
||||||
for dbname in db_list:
|
for dbname in db_list:
|
||||||
if dbname not in self.white_list and "telegraf" not in dbname:
|
if dbname not in self.white_list and "telegraf" not in dbname:
|
||||||
tdSql.execute(f'drop database if exists `{dbname}`')
|
tdSql.execute(f'drop database if exists `{dbname}`')
|
||||||
|
@ -1412,7 +1412,7 @@ class TDCom:
|
||||||
input_function (str): scalar
|
input_function (str): scalar
|
||||||
"""
|
"""
|
||||||
tdSql.query(sql)
|
tdSql.query(sql)
|
||||||
res = tdSql.queryResult
|
res = tdSql.res
|
||||||
if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]:
|
if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]:
|
||||||
tdSql.checkEqual(res[1][1], "DOUBLE")
|
tdSql.checkEqual(res[1][1], "DOUBLE")
|
||||||
tdSql.checkEqual(res[2][1], "DOUBLE")
|
tdSql.checkEqual(res[2][1], "DOUBLE")
|
||||||
|
@ -1490,7 +1490,7 @@ class TDCom:
|
||||||
bigint: bigint-ts
|
bigint: bigint-ts
|
||||||
"""
|
"""
|
||||||
tdSql.query(f'select cast({str_ts} as bigint)')
|
tdSql.query(f'select cast({str_ts} as bigint)')
|
||||||
return tdSql.queryResult[0][0]
|
return tdSql.res[0][0]
|
||||||
|
|
||||||
def cast_query_data(self, query_data):
|
def cast_query_data(self, query_data):
|
||||||
"""cast query-result for existed-stb
|
"""cast query-result for existed-stb
|
||||||
|
@ -1514,7 +1514,7 @@ class TDCom:
|
||||||
tdSql.query(f'select cast("{v}" as binary(6))')
|
tdSql.query(f'select cast("{v}" as binary(6))')
|
||||||
else:
|
else:
|
||||||
tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})')
|
tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})')
|
||||||
query_data_l[i] = tdSql.queryResult[0][0]
|
query_data_l[i] = tdSql.res[0][0]
|
||||||
else:
|
else:
|
||||||
query_data_l[i] = v
|
query_data_l[i] = v
|
||||||
nl.append(tuple(query_data_l))
|
nl.append(tuple(query_data_l))
|
||||||
|
@ -1566,9 +1566,9 @@ class TDCom:
|
||||||
if tag_value_list:
|
if tag_value_list:
|
||||||
dvalue = len(self.tag_type_str.split(',')) - defined_tag_count
|
dvalue = len(self.tag_type_str.split(',')) - defined_tag_count
|
||||||
tdSql.query(sql1)
|
tdSql.query(sql1)
|
||||||
res1 = tdSql.queryResult
|
res1 = tdSql.res
|
||||||
tdSql.query(sql2)
|
tdSql.query(sql2)
|
||||||
res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult
|
res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res
|
||||||
tdSql.sql = sql1
|
tdSql.sql = sql1
|
||||||
new_list = list()
|
new_list = list()
|
||||||
if tag_value_list:
|
if tag_value_list:
|
||||||
|
@ -1601,10 +1601,10 @@ class TDCom:
|
||||||
tdLog.info("query retrying ...")
|
tdLog.info("query retrying ...")
|
||||||
new_list = list()
|
new_list = list()
|
||||||
tdSql.query(sql1)
|
tdSql.query(sql1)
|
||||||
res1 = tdSql.queryResult
|
res1 = tdSql.res
|
||||||
tdSql.query(sql2)
|
tdSql.query(sql2)
|
||||||
# res2 = tdSql.queryResult
|
# res2 = tdSql.res
|
||||||
res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult
|
res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res
|
||||||
tdSql.sql = sql1
|
tdSql.sql = sql1
|
||||||
|
|
||||||
if tag_value_list:
|
if tag_value_list:
|
||||||
|
@ -1643,10 +1643,10 @@ class TDCom:
|
||||||
tdLog.info("query retrying ...")
|
tdLog.info("query retrying ...")
|
||||||
new_list = list()
|
new_list = list()
|
||||||
tdSql.query(sql1)
|
tdSql.query(sql1)
|
||||||
res1 = tdSql.queryResult
|
res1 = tdSql.res
|
||||||
tdSql.query(sql2)
|
tdSql.query(sql2)
|
||||||
# res2 = tdSql.queryResult
|
# res2 = tdSql.res
|
||||||
res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult
|
res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res
|
||||||
tdSql.sql = sql1
|
tdSql.sql = sql1
|
||||||
|
|
||||||
if tag_value_list:
|
if tag_value_list:
|
||||||
|
|
|
@ -13,23 +13,24 @@ from frame.common import *
|
||||||
|
|
||||||
class ClusterDnodes(TDDnodes):
|
class ClusterDnodes(TDDnodes):
|
||||||
"""rewrite TDDnodes and make MyDdnodes as TDDnodes child class"""
|
"""rewrite TDDnodes and make MyDdnodes as TDDnodes child class"""
|
||||||
def __init__(self ,dnodes_lists):
|
def __init__(self):
|
||||||
|
|
||||||
super(ClusterDnodes,self).__init__()
|
super(ClusterDnodes,self).__init__()
|
||||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
|
||||||
self.simDeployed = False
|
self.simDeployed = False
|
||||||
self.testCluster = False
|
self.testCluster = False
|
||||||
self.valgrind = 0
|
self.valgrind = 0
|
||||||
self.killValgrind = 1
|
self.killValgrind = 1
|
||||||
|
def init(self, dnodes_lists, deployPath, masterIp):
|
||||||
|
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||||
|
super(ClusterDnodes, self).init(deployPath, masterIp)
|
||||||
|
|
||||||
|
clusterDnodes = ClusterDnodes()
|
||||||
|
|
||||||
class ConfigureyCluster:
|
class ConfigureyCluster:
|
||||||
"""This will create defined number of dnodes and create a cluster.
|
"""This will create defined number of dnodes and create a cluster.
|
||||||
at the same time, it will return TDDnodes list: dnodes, """
|
at the same time, it will return TDDnodes list: dnodes, """
|
||||||
hostname = socket.gethostname()
|
hostname = socket.gethostname()
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.dnodes = []
|
self.dnodes = []
|
||||||
self.dnodeNums = 5
|
self.dnodeNums = 5
|
||||||
self.independent = True
|
self.independent = True
|
||||||
self.startPort = 6030
|
self.startPort = 6030
|
||||||
|
@ -86,10 +87,9 @@ class ConfigureyCluster:
|
||||||
count=0
|
count=0
|
||||||
while count < 5:
|
while count < 5:
|
||||||
tdSql.query("select * from information_schema.ins_dnodes")
|
tdSql.query("select * from information_schema.ins_dnodes")
|
||||||
# tdLog.debug(tdSql.queryResult)
|
|
||||||
status=0
|
status=0
|
||||||
for i in range(self.dnodeNums):
|
for i in range(self.dnodeNums):
|
||||||
if tdSql.queryResult[i][4] == "ready":
|
if tdSql.res[i][4] == "ready":
|
||||||
status+=1
|
status+=1
|
||||||
# tdLog.debug(status)
|
# tdLog.debug(status)
|
||||||
|
|
||||||
|
|
|
@ -251,6 +251,11 @@ class TDDnodes:
|
||||||
dnodesRootDir = "%s/sim" % (self.path)
|
dnodesRootDir = "%s/sim" % (self.path)
|
||||||
return dnodesRootDir
|
return dnodesRootDir
|
||||||
|
|
||||||
|
def getDnodeDir(self, index):
|
||||||
|
self.check(index)
|
||||||
|
dnodesDir = "%s/sim/dnode%d" % (self.path, index)
|
||||||
|
return dnodesDir
|
||||||
|
|
||||||
def getSimCfgPath(self):
|
def getSimCfgPath(self):
|
||||||
return self.sim.getCfgDir()
|
return self.sim.getCfgDir()
|
||||||
|
|
||||||
|
|
|
@ -78,42 +78,10 @@ class TDSql:
|
||||||
self.cursor.execute(s)
|
self.cursor.execute(s)
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
|
||||||
def error(self, sql, expectedErrno = None, expectErrInfo = None):
|
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
|
||||||
expectErrNotOccured = True
|
|
||||||
|
|
||||||
try:
|
#
|
||||||
self.cursor.execute(sql)
|
# do execute
|
||||||
except BaseException as e:
|
#
|
||||||
expectErrNotOccured = False
|
|
||||||
self.errno = e.errno
|
|
||||||
error_info = repr(e)
|
|
||||||
self.error_info = ','.join(error_info[error_info.index('(')+1:-1].split(",")[:-1]).replace("'","")
|
|
||||||
# self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
|
|
||||||
if expectErrNotOccured:
|
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
|
|
||||||
else:
|
|
||||||
self.queryRows = 0
|
|
||||||
self.queryCols = 0
|
|
||||||
self.queryResult = None
|
|
||||||
|
|
||||||
if expectedErrno != None:
|
|
||||||
if expectedErrno == self.errno:
|
|
||||||
tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno))
|
|
||||||
else:
|
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, errno %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno))
|
|
||||||
else:
|
|
||||||
tdLog.info("sql:%s, expect error occured" % (sql))
|
|
||||||
|
|
||||||
if expectErrInfo != None:
|
|
||||||
if expectErrInfo == self.error_info or expectErrInfo in self.error_info:
|
|
||||||
tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo))
|
|
||||||
else:
|
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
|
||||||
else:
|
|
||||||
tdLog.info("sql:%s, expect error occured" % (sql))
|
|
||||||
|
|
||||||
return self.error_info
|
|
||||||
|
|
||||||
def errors(self, sql_list, expected_error_id_list=None, expected_error_info_list=None):
|
def errors(self, sql_list, expected_error_id_list=None, expected_error_info_list=None):
|
||||||
"""Execute the sql query and check the error info, expected error id or info should keep the same order with sql list,
|
"""Execute the sql query and check the error info, expected error id or info should keep the same order with sql list,
|
||||||
|
@ -152,7 +120,9 @@ class TDSql:
|
||||||
self.checkRows(0)
|
self.checkRows(0)
|
||||||
else:
|
else:
|
||||||
self.checkRows(len(expect_result_list[index]))
|
self.checkRows(len(expect_result_list[index]))
|
||||||
assert(self.queryResult == expect_result_list[index])
|
for row in range(len(expect_result_list[index])):
|
||||||
|
for col in range(len(expect_result_list[index][row])):
|
||||||
|
self.checkData(row, col, expect_result_list[index][row][col])
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
raise(ex)
|
raise(ex)
|
||||||
|
|
||||||
|
@ -162,22 +132,22 @@ class TDSql:
|
||||||
while i <= queryTimes:
|
while i <= queryTimes:
|
||||||
try:
|
try:
|
||||||
self.cursor.execute(sql)
|
self.cursor.execute(sql)
|
||||||
self.queryResult = self.cursor.fetchall()
|
self.res = self.cursor.fetchall()
|
||||||
self.queryRows = len(self.queryResult)
|
self.queryRows = len(self.res)
|
||||||
self.queryCols = len(self.cursor.description)
|
self.queryCols = len(self.cursor.description)
|
||||||
|
|
||||||
if count_expected_res is not None:
|
if count_expected_res is not None:
|
||||||
counter = 0
|
counter = 0
|
||||||
while count_expected_res != self.queryResult[0][0]:
|
while count_expected_res != self.res[0][0]:
|
||||||
self.cursor.execute(sql)
|
self.cursor.execute(sql)
|
||||||
self.queryResult = self.cursor.fetchall()
|
self.res = self.cursor.fetchall()
|
||||||
if counter < queryTimes:
|
if counter < queryTimes:
|
||||||
counter += 0.5
|
counter += 0.5
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
if row_tag:
|
if row_tag:
|
||||||
return self.queryResult
|
return self.res
|
||||||
return self.queryRows
|
return self.queryRows
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
tdLog.notice("Try to query again, query times: %d "%i)
|
tdLog.notice("Try to query again, query times: %d "%i)
|
||||||
|
@ -190,6 +160,58 @@ class TDSql:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def executeTimes(self, sql, times):
|
||||||
|
for i in range(times):
|
||||||
|
try:
|
||||||
|
return self.cursor.execute(sql)
|
||||||
|
except BaseException:
|
||||||
|
time.sleep(1)
|
||||||
|
continue
|
||||||
|
|
||||||
|
def execute(self, sql, queryTimes=30, show=False):
|
||||||
|
self.sql = sql
|
||||||
|
if show:
|
||||||
|
tdLog.info(sql)
|
||||||
|
i=1
|
||||||
|
while i <= queryTimes:
|
||||||
|
try:
|
||||||
|
self.affectedRows = self.cursor.execute(sql)
|
||||||
|
return self.affectedRows
|
||||||
|
except Exception as e:
|
||||||
|
tdLog.notice("Try to execute sql again, query times: %d "%i)
|
||||||
|
if i == queryTimes:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
i+=1
|
||||||
|
time.sleep(1)
|
||||||
|
pass
|
||||||
|
|
||||||
|
# execute many sql
|
||||||
|
def executes(self, sqls, queryTimes=30, show=False):
|
||||||
|
for sql in sqls:
|
||||||
|
self.execute(sql, queryTimes, show)
|
||||||
|
|
||||||
|
def waitedQuery(self, sql, expectRows, timeout):
|
||||||
|
tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout))
|
||||||
|
self.sql = sql
|
||||||
|
try:
|
||||||
|
for i in range(timeout):
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
self.res = self.cursor.fetchall()
|
||||||
|
self.queryRows = len(self.res)
|
||||||
|
self.queryCols = len(self.cursor.description)
|
||||||
|
tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
|
||||||
|
if self.queryRows >= expectRows:
|
||||||
|
return (self.queryRows, i)
|
||||||
|
time.sleep(1)
|
||||||
|
except Exception as e:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
return (self.queryRows, timeout)
|
||||||
|
|
||||||
def is_err_sql(self, sql):
|
def is_err_sql(self, sql):
|
||||||
err_flag = True
|
err_flag = True
|
||||||
|
@ -200,6 +222,69 @@ class TDSql:
|
||||||
|
|
||||||
return False if err_flag else True
|
return False if err_flag else True
|
||||||
|
|
||||||
|
def error(self, sql, expectedErrno = None, expectErrInfo = None):
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
expectErrNotOccured = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
except BaseException as e:
|
||||||
|
expectErrNotOccured = False
|
||||||
|
self.errno = e.errno
|
||||||
|
error_info = repr(e)
|
||||||
|
self.error_info = ','.join(error_info[error_info.index('(')+1:-1].split(",")[:-1]).replace("'","")
|
||||||
|
# self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
|
||||||
|
if expectErrNotOccured:
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
|
||||||
|
else:
|
||||||
|
self.queryRows = 0
|
||||||
|
self.queryCols = 0
|
||||||
|
self.res = None
|
||||||
|
|
||||||
|
if expectedErrno != None:
|
||||||
|
if expectedErrno == self.errno:
|
||||||
|
tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, errno %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno))
|
||||||
|
else:
|
||||||
|
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||||
|
|
||||||
|
if expectErrInfo != None:
|
||||||
|
if expectErrInfo == self.error_info or expectErrInfo in self.error_info:
|
||||||
|
tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
||||||
|
else:
|
||||||
|
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||||
|
|
||||||
|
return self.error_info
|
||||||
|
|
||||||
|
#
|
||||||
|
# get session
|
||||||
|
#
|
||||||
|
|
||||||
|
def getData(self, row, col):
|
||||||
|
self.checkRowCol(row, col)
|
||||||
|
return self.res[row][col]
|
||||||
|
|
||||||
|
def getColData(self, col):
|
||||||
|
colDatas = []
|
||||||
|
for i in range(self.queryRows):
|
||||||
|
colDatas.append(self.res[i][col])
|
||||||
|
return colDatas
|
||||||
|
|
||||||
|
def getResult(self, sql):
|
||||||
|
self.sql = sql
|
||||||
|
try:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
self.res = self.cursor.fetchall()
|
||||||
|
except Exception as e:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
return self.res
|
||||||
|
|
||||||
def getVariable(self, search_attr):
|
def getVariable(self, search_attr):
|
||||||
'''
|
'''
|
||||||
get variable of search_attr access "show variables"
|
get variable of search_attr access "show variables"
|
||||||
|
@ -234,29 +319,19 @@ class TDSql:
|
||||||
return col_name_list, col_type_list
|
return col_name_list, col_type_list
|
||||||
return col_name_list
|
return col_name_list
|
||||||
|
|
||||||
def waitedQuery(self, sql, expectRows, timeout):
|
|
||||||
tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout))
|
|
||||||
self.sql = sql
|
|
||||||
try:
|
|
||||||
for i in range(timeout):
|
|
||||||
self.cursor.execute(sql)
|
|
||||||
self.queryResult = self.cursor.fetchall()
|
|
||||||
self.queryRows = len(self.queryResult)
|
|
||||||
self.queryCols = len(self.cursor.description)
|
|
||||||
tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
|
|
||||||
if self.queryRows >= expectRows:
|
|
||||||
return (self.queryRows, i)
|
|
||||||
time.sleep(1)
|
|
||||||
except Exception as e:
|
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
|
||||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
|
||||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
|
||||||
raise Exception(repr(e))
|
|
||||||
return (self.queryRows, timeout)
|
|
||||||
|
|
||||||
def getRows(self):
|
def getRows(self):
|
||||||
return self.queryRows
|
return self.queryRows
|
||||||
|
|
||||||
|
# get first value
|
||||||
|
def getFirstValue(self, sql) :
|
||||||
|
self.query(sql)
|
||||||
|
return self.getData(0, 0)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# check session
|
||||||
|
#
|
||||||
|
|
||||||
def checkRows(self, expectRows):
|
def checkRows(self, expectRows):
|
||||||
if self.queryRows == expectRows:
|
if self.queryRows == expectRows:
|
||||||
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows))
|
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows))
|
||||||
|
@ -314,26 +389,26 @@ class TDSql:
|
||||||
|
|
||||||
self.checkRowCol(row, col)
|
self.checkRowCol(row, col)
|
||||||
|
|
||||||
if self.queryResult[row][col] != data:
|
if self.res[row][col] != data:
|
||||||
if self.cursor.istype(col, "TIMESTAMP"):
|
if self.cursor.istype(col, "TIMESTAMP"):
|
||||||
# suppose user want to check nanosecond timestamp if a longer data passed``
|
# suppose user want to check nanosecond timestamp if a longer data passed``
|
||||||
if isinstance(data,str) :
|
if isinstance(data,str) :
|
||||||
if (len(data) >= 28):
|
if (len(data) >= 28):
|
||||||
if self.queryResult[row][col] == _parse_ns_timestamp(data):
|
if self.res[row][col] == _parse_ns_timestamp(data):
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
else:
|
else:
|
||||||
if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc):
|
if self.res[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc):
|
||||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
return
|
return
|
||||||
elif isinstance(data,int):
|
elif isinstance(data,int):
|
||||||
|
@ -345,72 +420,72 @@ class TDSql:
|
||||||
precision = 'ns'
|
precision = 'ns'
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
return
|
return
|
||||||
success = False
|
success = False
|
||||||
if precision == 'ms':
|
if precision == 'ms':
|
||||||
dt_obj = self.queryResult[row][col]
|
dt_obj = self.res[row][col]
|
||||||
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000)
|
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000)
|
||||||
if ts == data:
|
if ts == data:
|
||||||
success = True
|
success = True
|
||||||
elif precision == 'us':
|
elif precision == 'us':
|
||||||
dt_obj = self.queryResult[row][col]
|
dt_obj = self.res[row][col]
|
||||||
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond)
|
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond)
|
||||||
if ts == data:
|
if ts == data:
|
||||||
success = True
|
success = True
|
||||||
elif precision == 'ns':
|
elif precision == 'ns':
|
||||||
if data == self.queryResult[row][col]:
|
if data == self.res[row][col]:
|
||||||
success = True
|
success = True
|
||||||
if success:
|
if success:
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
return
|
return
|
||||||
elif isinstance(data,datetime.datetime):
|
elif isinstance(data,datetime.datetime):
|
||||||
dt_obj = self.queryResult[row][col]
|
dt_obj = self.res[row][col]
|
||||||
delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo)
|
delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo)
|
||||||
delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo)
|
delt_result = self.res[row][col] - datetime.datetime.fromtimestamp(0,self.res[row][col].tzinfo)
|
||||||
if delt_data == delt_result:
|
if delt_data == delt_result:
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
|
||||||
if str(self.queryResult[row][col]) == str(data):
|
if str(self.res[row][col]) == str(data):
|
||||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
return
|
return
|
||||||
|
|
||||||
elif isinstance(data, float):
|
elif isinstance(data, float):
|
||||||
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
|
if abs(data) >= 1 and abs((self.res[row][col] - data) / data) <= 0.000001:
|
||||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
|
elif abs(data) < 1 and abs(self.res[row][col] - data) <= 0.000001:
|
||||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
|
@ -438,23 +513,23 @@ class TDSql:
|
||||||
def checkDataNoExit(self, row, col, data):
|
def checkDataNoExit(self, row, col, data):
|
||||||
if self.checkRowColNoExit(row, col) == False:
|
if self.checkRowColNoExit(row, col) == False:
|
||||||
return False
|
return False
|
||||||
if self.queryResult[row][col] != data:
|
if self.res[row][col] != data:
|
||||||
if self.cursor.istype(col, "TIMESTAMP"):
|
if self.cursor.istype(col, "TIMESTAMP"):
|
||||||
# suppose user want to check nanosecond timestamp if a longer data passed
|
# suppose user want to check nanosecond timestamp if a longer data passed
|
||||||
if (len(data) >= 28):
|
if (len(data) >= 28):
|
||||||
if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data):
|
if pd.to_datetime(self.res[row][col]) == pd.to_datetime(data):
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
if self.queryResult[row][col] == _parse_datetime(data):
|
if self.res[row][col] == _parse_datetime(data):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if str(self.queryResult[row][col]) == str(data):
|
if str(self.res[row][col]) == str(data):
|
||||||
return True
|
return True
|
||||||
elif isinstance(data, float):
|
elif isinstance(data, float):
|
||||||
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
|
if abs(data) >= 1 and abs((self.res[row][col] - data) / data) <= 0.000001:
|
||||||
return True
|
return True
|
||||||
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
|
elif abs(data) < 1 and abs(self.res[row][col] - data) <= 0.000001:
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
@ -478,56 +553,6 @@ class TDSql:
|
||||||
self.query(sql)
|
self.query(sql)
|
||||||
self.checkData(row, col, data)
|
self.checkData(row, col, data)
|
||||||
|
|
||||||
|
|
||||||
def getData(self, row, col):
|
|
||||||
self.checkRowCol(row, col)
|
|
||||||
return self.queryResult[row][col]
|
|
||||||
|
|
||||||
def getResult(self, sql):
|
|
||||||
self.sql = sql
|
|
||||||
try:
|
|
||||||
self.cursor.execute(sql)
|
|
||||||
self.queryResult = self.cursor.fetchall()
|
|
||||||
except Exception as e:
|
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
|
||||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
|
||||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
|
||||||
raise Exception(repr(e))
|
|
||||||
return self.queryResult
|
|
||||||
|
|
||||||
def executeTimes(self, sql, times):
|
|
||||||
for i in range(times):
|
|
||||||
try:
|
|
||||||
return self.cursor.execute(sql)
|
|
||||||
except BaseException:
|
|
||||||
time.sleep(1)
|
|
||||||
continue
|
|
||||||
|
|
||||||
def execute(self, sql, queryTimes=30, show=False):
|
|
||||||
self.sql = sql
|
|
||||||
if show:
|
|
||||||
tdLog.info(sql)
|
|
||||||
i=1
|
|
||||||
while i <= queryTimes:
|
|
||||||
try:
|
|
||||||
self.affectedRows = self.cursor.execute(sql)
|
|
||||||
return self.affectedRows
|
|
||||||
except Exception as e:
|
|
||||||
tdLog.notice("Try to execute sql again, query times: %d "%i)
|
|
||||||
if i == queryTimes:
|
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
|
||||||
args = (caller.filename, caller.lineno, sql, repr(e))
|
|
||||||
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
|
||||||
raise Exception(repr(e))
|
|
||||||
i+=1
|
|
||||||
time.sleep(1)
|
|
||||||
pass
|
|
||||||
|
|
||||||
# execute many sql
|
|
||||||
def executes(self, sqls, queryTimes=30, show=False):
|
|
||||||
for sql in sqls:
|
|
||||||
self.execute(sql, queryTimes, show)
|
|
||||||
|
|
||||||
def checkAffectedRows(self, expectAffectedRows):
|
def checkAffectedRows(self, expectAffectedRows):
|
||||||
if self.affectedRows != expectAffectedRows:
|
if self.affectedRows != expectAffectedRows:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
@ -585,17 +610,22 @@ class TDSql:
|
||||||
def checkAgg(self, sql, expectCnt):
|
def checkAgg(self, sql, expectCnt):
|
||||||
self.query(sql)
|
self.query(sql)
|
||||||
self.checkData(0, 0, expectCnt)
|
self.checkData(0, 0, expectCnt)
|
||||||
|
|
||||||
# get first value
|
|
||||||
def getFirstValue(self, sql) :
|
|
||||||
self.query(sql)
|
|
||||||
return self.getData(0, 0)
|
|
||||||
|
|
||||||
# expect first value
|
# expect first value
|
||||||
def checkFirstValue(self, sql, expect):
|
def checkFirstValue(self, sql, expect):
|
||||||
self.query(sql)
|
self.query(sql)
|
||||||
self.checkData(0, 0, expect)
|
self.checkData(0, 0, expect)
|
||||||
|
|
||||||
|
# colIdx1 value same with colIdx2
|
||||||
|
def checkSameColumn(self, c1, c2):
|
||||||
|
for i in range(self.queryRows):
|
||||||
|
if self.res[i][c1] != self.res[i][c2]:
|
||||||
|
tdLog.exit(f"Not same. row={i} col1={c1} col2={c2}. {self.res[i][c1]}!={self.res[i][c2]}")
|
||||||
|
tdLog.info(f"check {self.queryRows} rows two column value same. column index [{c1},{c2}]")
|
||||||
|
|
||||||
|
#
|
||||||
|
# others session
|
||||||
|
#
|
||||||
|
|
||||||
def get_times(self, time_str, precision="ms"):
|
def get_times(self, time_str, precision="ms"):
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
|
|
@ -405,15 +405,15 @@ if __name__ == "__main__":
|
||||||
else :
|
else :
|
||||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode, level=level, disk=disk)
|
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode, level=level, disk=disk)
|
||||||
tdDnodes = ClusterDnodes(dnodeslist)
|
clusterDnodes.init(dnodeslist, deployPath, masterIp)
|
||||||
tdDnodes.init(deployPath, masterIp)
|
clusterDnodes.setTestCluster(testCluster)
|
||||||
tdDnodes.setTestCluster(testCluster)
|
clusterDnodes.setValgrind(valgrind)
|
||||||
tdDnodes.setValgrind(valgrind)
|
clusterDnodes.setAsan(asan)
|
||||||
tdDnodes.stopAll()
|
clusterDnodes.stopAll()
|
||||||
for dnode in tdDnodes.dnodes:
|
for dnode in clusterDnodes.dnodes:
|
||||||
tdDnodes.deploy(dnode.index, updateCfgDict)
|
clusterDnodes.deploy(dnode.index, updateCfgDict)
|
||||||
for dnode in tdDnodes.dnodes:
|
for dnode in clusterDnodes.dnodes:
|
||||||
tdDnodes.starttaosd(dnode.index)
|
clusterDnodes.starttaosd(dnode.index)
|
||||||
tdCases.logSql(logSql)
|
tdCases.logSql(logSql)
|
||||||
|
|
||||||
if restful or websocket:
|
if restful or websocket:
|
||||||
|
@ -560,17 +560,6 @@ if __name__ == "__main__":
|
||||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||||
else:
|
else:
|
||||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||||
# tdSql.init(conn.cursor())
|
|
||||||
# tdSql.execute("create qnode on dnode 1")
|
|
||||||
# tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
|
|
||||||
# tdSql.query("show local variables;")
|
|
||||||
# for i in range(tdSql.queryRows):
|
|
||||||
# if tdSql.queryResult[i][0] == "queryPolicy" :
|
|
||||||
# if int(tdSql.queryResult[i][1]) == int(queryPolicy):
|
|
||||||
# tdLog.info('alter queryPolicy to %d successfully'%queryPolicy)
|
|
||||||
# else :
|
|
||||||
# tdLog.debug(tdSql.queryResult)
|
|
||||||
# tdLog.exit("alter queryPolicy to %d failed"%queryPolicy)
|
|
||||||
|
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
cursor.execute("create qnode on dnode 1")
|
cursor.execute("create qnode on dnode 1")
|
||||||
|
@ -591,16 +580,15 @@ if __name__ == "__main__":
|
||||||
print(independentMnode,"independentMnode valuse")
|
print(independentMnode,"independentMnode valuse")
|
||||||
# create dnode list
|
# create dnode list
|
||||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode, level=level, disk=disk)
|
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode, level=level, disk=disk)
|
||||||
tdDnodes = ClusterDnodes(dnodeslist)
|
clusterDnodes.init(dnodeslist, deployPath, masterIp)
|
||||||
tdDnodes.init(deployPath, masterIp)
|
clusterDnodes.setTestCluster(testCluster)
|
||||||
tdDnodes.setTestCluster(testCluster)
|
clusterDnodes.setValgrind(valgrind)
|
||||||
tdDnodes.setValgrind(valgrind)
|
clusterDnodes.setAsan(asan)
|
||||||
tdDnodes.setAsan(asan)
|
clusterDnodes.stopAll()
|
||||||
tdDnodes.stopAll()
|
for dnode in clusterDnodes.dnodes:
|
||||||
for dnode in tdDnodes.dnodes:
|
clusterDnodes.deploy(dnode.index,updateCfgDict)
|
||||||
tdDnodes.deploy(dnode.index,updateCfgDict)
|
for dnode in clusterDnodes.dnodes:
|
||||||
for dnode in tdDnodes.dnodes:
|
clusterDnodes.starttaosd(dnode.index)
|
||||||
tdDnodes.starttaosd(dnode.index)
|
|
||||||
tdCases.logSql(logSql)
|
tdCases.logSql(logSql)
|
||||||
|
|
||||||
if restful or websocket:
|
if restful or websocket:
|
||||||
|
|
|
@ -3,7 +3,13 @@
|
||||||
#NA,NA,y or n,script,./test.sh -f tsim/user/basic.sim
|
#NA,NA,y or n,script,./test.sh -f tsim/user/basic.sim
|
||||||
|
|
||||||
#unit-test
|
#unit-test
|
||||||
|
|
||||||
|
archOs=$(arch)
|
||||||
|
if [[ $archOs =~ "aarch64" ]]; then
|
||||||
|
,,n,unit-test,bash test.sh
|
||||||
|
else
|
||||||
,,y,unit-test,bash test.sh
|
,,y,unit-test,bash test.sh
|
||||||
|
fi
|
||||||
|
|
||||||
#
|
#
|
||||||
# army-test
|
# army-test
|
||||||
|
@ -12,6 +18,9 @@
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1
|
,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f community/query/function/test_func_elapsed.py
|
,,y,army,./pytest.sh python3 ./test.py -f community/query/function/test_func_elapsed.py
|
||||||
|
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/incSnapshot.py -N 3 -L 3 -D 2
|
||||||
|
,,y,army,./pytest.sh python3 ./test.py -f community/query/query_basic.py -N 3
|
||||||
|
|
||||||
,,n,army,python3 ./test.py -f community/cmdline/fullopt.py
|
,,n,army,python3 ./test.py -f community/cmdline/fullopt.py
|
||||||
|
|
||||||
|
|
||||||
|
@ -1247,7 +1256,9 @@ e
|
||||||
,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||||
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||||
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim
|
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim
|
||||||
,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
|
||||||
|
### refactor stream backend, open case after rsma refactored
|
||||||
|
#,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||||
,,y,script,./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim
|
,,y,script,./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim
|
||||||
,,n,script,./test.sh -f tsim/valgrind/checkError1.sim
|
,,n,script,./test.sh -f tsim/valgrind/checkError1.sim
|
||||||
,,n,script,./test.sh -f tsim/valgrind/checkError2.sim
|
,,n,script,./test.sh -f tsim/valgrind/checkError2.sim
|
||||||
|
|
|
@ -97,7 +97,24 @@ class TDSql:
|
||||||
i+=1
|
i+=1
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def no_error(self, sql):
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
expectErrOccurred = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
except BaseException as e:
|
||||||
|
expectErrOccurred = True
|
||||||
|
self.errno = e.errno
|
||||||
|
error_info = repr(e)
|
||||||
|
self.error_info = ','.join(error_info[error_info.index('(') + 1:-1].split(",")[:-1]).replace("'", "")
|
||||||
|
|
||||||
|
if expectErrOccurred:
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, unexpect error '%s' occurred" % (caller.filename, caller.lineno, sql, self.error_info))
|
||||||
|
else:
|
||||||
|
tdLog.info("sql:%s, check passed, no ErrInfo occurred" % (sql))
|
||||||
|
|
||||||
def error(self, sql, expectedErrno = None, expectErrInfo = None, fullMatched = True):
|
def error(self, sql, expectedErrno = None, expectErrInfo = None, fullMatched = True):
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
expectErrNotOccured = True
|
expectErrNotOccured = True
|
||||||
|
@ -126,9 +143,9 @@ class TDSql:
|
||||||
|
|
||||||
if expectErrInfo != None:
|
if expectErrInfo != None:
|
||||||
if expectErrInfo == self.error_info:
|
if expectErrInfo == self.error_info:
|
||||||
tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo))
|
tdLog.info("sql:%s, expected ErrInfo '%s' occured" % (sql, expectErrInfo))
|
||||||
else:
|
else:
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo '%s' occured, but not expected expectErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
tdLog.exit("%s(%d) failed: sql:%s, ErrInfo '%s' occured, but not expected ErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
||||||
else:
|
else:
|
||||||
if expectedErrno != None:
|
if expectedErrno != None:
|
||||||
if expectedErrno in self.errno:
|
if expectedErrno in self.errno:
|
||||||
|
@ -138,9 +155,9 @@ class TDSql:
|
||||||
|
|
||||||
if expectErrInfo != None:
|
if expectErrInfo != None:
|
||||||
if expectErrInfo in self.error_info:
|
if expectErrInfo in self.error_info:
|
||||||
tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo))
|
tdLog.info("sql:%s, expected ErrInfo '%s' occured" % (sql, expectErrInfo))
|
||||||
else:
|
else:
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected expectErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
tdLog.exit("%s(%d) failed: sql:%s, ErrInfo %s occured, but not expected ErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
||||||
|
|
||||||
return self.error_info
|
return self.error_info
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,8 @@ sql create table ts3 using st tags(3,2,2);
|
||||||
sql create table ts4 using st tags(4,2,2);
|
sql create table ts4 using st tags(4,2,2);
|
||||||
sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by tbname interval(10s);
|
sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by tbname interval(10s);
|
||||||
|
|
||||||
|
sleep 1000
|
||||||
|
|
||||||
sql insert into ts1 values(1648791213001,1,12,3,1.0);
|
sql insert into ts1 values(1648791213001,1,12,3,1.0);
|
||||||
sql insert into ts2 values(1648791213001,1,12,3,1.0);
|
sql insert into ts2 values(1648791213001,1,12,3,1.0);
|
||||||
|
|
||||||
|
|
|
@ -280,7 +280,9 @@
|
||||||
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||||
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||||
./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim
|
./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim
|
||||||
./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
|
||||||
|
### refactor stream backend, open case after rsma refactored
|
||||||
|
#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||||
./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim
|
./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim
|
||||||
./test.sh -f tsim/valgrind/checkError1.sim
|
./test.sh -f tsim/valgrind/checkError1.sim
|
||||||
./test.sh -f tsim/valgrind/checkError2.sim
|
./test.sh -f tsim/valgrind/checkError2.sim
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
{
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"connection_pool_size": 8,
|
||||||
|
"num_of_records_per_req": 2000,
|
||||||
|
"thread_count": 2,
|
||||||
|
"create_table_thread_count": 10,
|
||||||
|
"result_file": "./insert_res_mix.txt",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"check_sql": "yes",
|
||||||
|
"continue_if_fail": "yes",
|
||||||
|
"databases": [
|
||||||
|
{
|
||||||
|
"dbinfo": {
|
||||||
|
"name": "curdb",
|
||||||
|
"drop": "yes",
|
||||||
|
"vgroups": 2,
|
||||||
|
"replica": 1,
|
||||||
|
"precision": "ms",
|
||||||
|
"stt_trigger": 8,
|
||||||
|
"minRows": 100,
|
||||||
|
"maxRows": 4096
|
||||||
|
},
|
||||||
|
"super_tables": [
|
||||||
|
{
|
||||||
|
"name": "meters",
|
||||||
|
"child_table_exists": "no",
|
||||||
|
"childtable_count": 5,
|
||||||
|
"insert_rows": 100000,
|
||||||
|
"childtable_prefix": "d",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"timestamp_step": 1000,
|
||||||
|
"start_timestamp":"2022-09-01 10:00:00",
|
||||||
|
"disorder_ratio": 60,
|
||||||
|
"update_ratio": 70,
|
||||||
|
"delete_ratio": 30,
|
||||||
|
"disorder_fill_interval": 300,
|
||||||
|
"update_fill_interval": 25,
|
||||||
|
"generate_row_rule": 2,
|
||||||
|
"columns": [
|
||||||
|
{ "type": "bool", "name": "bc"},
|
||||||
|
{ "type": "float", "name": "fc", "max": 1, "min": 0 },
|
||||||
|
{ "type": "double", "name": "dc", "max": 1, "min": 0 },
|
||||||
|
{ "type": "tinyint", "name": "ti", "max": 100, "min": 0 },
|
||||||
|
{ "type": "smallint", "name": "si", "max": 100, "min": 0 },
|
||||||
|
{ "type": "int", "name": "ic", "max": 100, "min": 0 },
|
||||||
|
{ "type": "bigint", "name": "bi", "max": 100, "min": 0 },
|
||||||
|
{ "type": "utinyint", "name": "uti", "max": 100, "min": 0 },
|
||||||
|
{ "type": "usmallint", "name": "usi", "max": 100, "min": 0 },
|
||||||
|
{ "type": "uint", "name": "ui", "max": 100, "min": 0 },
|
||||||
|
{ "type": "ubigint", "name": "ubi", "max": 100, "min": 0 },
|
||||||
|
{ "type": "binary", "name": "bin", "len": 32},
|
||||||
|
{ "type": "nchar", "name": "nch", "len": 64}
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
{
|
||||||
|
"type": "tinyint",
|
||||||
|
"name": "groupid",
|
||||||
|
"max": 10,
|
||||||
|
"min": 1
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "location",
|
||||||
|
"type": "binary",
|
||||||
|
"len": 16,
|
||||||
|
"values": ["San Francisco", "Los Angles", "San Diego",
|
||||||
|
"San Jose", "Palo Alto", "Campbell", "Mountain View",
|
||||||
|
"Sunnyvale", "Santa Clara", "Cupertino"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -152,6 +152,13 @@ class TDTestCase:
|
||||||
tdLog.info(f" LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
|
tdLog.info(f" LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
|
||||||
os.system(f"LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
|
os.system(f"LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ")
|
||||||
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database test '")
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database test '")
|
||||||
|
os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/com_alltypedata.json -y")
|
||||||
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database curdb '")
|
||||||
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select count(*) from curdb.meters '")
|
||||||
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select sum(fc) from curdb.meters '")
|
||||||
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select avg(ic) from curdb.meters '")
|
||||||
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select min(ui) from curdb.meters '")
|
||||||
|
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'select max(bi) from curdb.meters '")
|
||||||
|
|
||||||
# os.system(f"LD_LIBRARY_PATH=/usr/lib taos -s 'use test;create stream current_stream into current_stream_output_stb as select _wstart as `start`, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);' ")
|
# os.system(f"LD_LIBRARY_PATH=/usr/lib taos -s 'use test;create stream current_stream into current_stream_output_stb as select _wstart as `start`, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);' ")
|
||||||
# os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;create stream power_stream into power_stream_output_stb as select ts, concat_ws(\\".\\", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;" ')
|
# os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;create stream power_stream into power_stream_output_stb as select ts, concat_ws(\\".\\", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;" ')
|
||||||
|
|
|
@ -0,0 +1,275 @@
|
||||||
|
from urllib.parse import uses_relative
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import platform
|
||||||
|
import inspect
|
||||||
|
from taos.tmq import Consumer
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
from util.dnodes import TDDnodes
|
||||||
|
from util.dnodes import TDDnode
|
||||||
|
from util.cluster import *
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
BASEVERSION = "3.0.2.3"
|
||||||
|
class TDTestCase:
|
||||||
|
def caseDescription(self):
|
||||||
|
f'''
|
||||||
|
3.0 data compatibility test
|
||||||
|
case1: basedata version is {BASEVERSION}
|
||||||
|
'''
|
||||||
|
return
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300 stt_trigger 1; ;use deldata;
|
||||||
|
create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int);
|
||||||
|
create table deldata.ct1 using deldata.stb1 tags ( 1 );
|
||||||
|
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );
|
||||||
|
select avg(c1) from deldata.ct1;
|
||||||
|
delete from deldata.stb1;
|
||||||
|
flush database deldata;
|
||||||
|
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );
|
||||||
|
delete from deldata.ct1;
|
||||||
|
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a );
|
||||||
|
flush database deldata;'''
|
||||||
|
def checkProcessPid(self,processName):
|
||||||
|
i=0
|
||||||
|
while i<60:
|
||||||
|
print(f"wait stop {processName}")
|
||||||
|
processPid = subprocess.getstatusoutput(f'ps aux|grep {processName} |grep -v "grep"|awk \'{{print $2}}\'')[1]
|
||||||
|
print(f"times:{i},{processName}-pid:{processPid}")
|
||||||
|
if(processPid == ""):
|
||||||
|
break
|
||||||
|
i += 1
|
||||||
|
sleep(1)
|
||||||
|
else:
|
||||||
|
print(f'this processName is not stoped in 60s')
|
||||||
|
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
self.projPath = projPath
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files or "taosd.exe" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def getCfgPath(self):
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
cfgPath = buildPath + "/../sim/dnode1/cfg/"
|
||||||
|
else:
|
||||||
|
cfgPath = buildPath + "/../sim/dnode1/cfg/"
|
||||||
|
|
||||||
|
return cfgPath
|
||||||
|
|
||||||
|
def installTaosd(self,bPath,cPath):
|
||||||
|
# os.system(f"rmtaos && mkdir -p {self.getBuildPath()}/build/lib/temp && mv {self.getBuildPath()}/build/lib/libtaos.so* {self.getBuildPath()}/build/lib/temp/ ")
|
||||||
|
# os.system(f" mv {bPath}/build {bPath}/build_bak ")
|
||||||
|
# os.system(f"mv {self.getBuildPath()}/build/lib/libtaos.so {self.getBuildPath()}/build/lib/libtaos.so_bak ")
|
||||||
|
# os.system(f"mv {self.getBuildPath()}/build/lib/libtaos.so.1 {self.getBuildPath()}/build/lib/libtaos.so.1_bak ")
|
||||||
|
|
||||||
|
packagePath = "/usr/local/src/"
|
||||||
|
dataPath = cPath + "/../data/"
|
||||||
|
if platform.system() == "Linux" and platform.machine() == "aarch64":
|
||||||
|
packageName = "TDengine-server-"+ BASEVERSION + "-Linux-arm64.tar.gz"
|
||||||
|
else:
|
||||||
|
packageName = "TDengine-server-"+ BASEVERSION + "-Linux-x64.tar.gz"
|
||||||
|
packageTPath = packageName.split("-Linux-")[0]
|
||||||
|
my_file = Path(f"{packagePath}/{packageName}")
|
||||||
|
if not my_file.exists():
|
||||||
|
print(f"{packageName} is not exists")
|
||||||
|
tdLog.info(f"cd {packagePath} && wget https://www.tdengine.com/assets-download/3.0/{packageName}")
|
||||||
|
os.system(f"cd {packagePath} && wget https://www.tdengine.com/assets-download/3.0/{packageName}")
|
||||||
|
else:
|
||||||
|
print(f"{packageName} has been exists")
|
||||||
|
os.system(f" cd {packagePath} && tar xvf {packageName} && cd {packageTPath} && ./install.sh -e no " )
|
||||||
|
tdDnodes.stop(1)
|
||||||
|
print(f"start taosd: rm -rf {dataPath}/* && nohup taosd -c {cPath} & ")
|
||||||
|
os.system(f"rm -rf {dataPath}/* && nohup taosd -c {cPath} & " )
|
||||||
|
sleep(5)
|
||||||
|
|
||||||
|
|
||||||
|
def buildTaosd(self,bPath):
|
||||||
|
# os.system(f"mv {bPath}/build_bak {bPath}/build ")
|
||||||
|
os.system(f" cd {bPath} ")
|
||||||
|
|
||||||
|
def is_list_same_as_ordered_list(self,unordered_list, ordered_list):
|
||||||
|
sorted_list = sorted(unordered_list)
|
||||||
|
return sorted_list == ordered_list
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
scriptsPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
distro_id = distro.id()
|
||||||
|
if distro_id == "alpine":
|
||||||
|
tdLog.info(f"alpine skip compatibility test")
|
||||||
|
return True
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
tdLog.info(f"Windows skip compatibility test")
|
||||||
|
return True
|
||||||
|
bPath = self.getBuildPath()
|
||||||
|
cPath = self.getCfgPath()
|
||||||
|
dbname = "test"
|
||||||
|
stb = f"{dbname}.meters"
|
||||||
|
os.system("echo 'debugFlag 143' > /etc/taos/taos.cfg ")
|
||||||
|
tableNumbers=100
|
||||||
|
recordNumbers1=100
|
||||||
|
recordNumbers2=1000
|
||||||
|
|
||||||
|
# tdsqlF=tdCom.newTdSql()
|
||||||
|
# print(tdsqlF)
|
||||||
|
# tdsqlF.query(f"SELECT SERVER_VERSION();")
|
||||||
|
# print(tdsqlF.query(f"SELECT SERVER_VERSION();"))
|
||||||
|
# oldServerVersion=tdsqlF.queryResult[0][0]
|
||||||
|
# tdLog.info(f"Base server version is {oldServerVersion}")
|
||||||
|
# tdsqlF.query(f"SELECT CLIENT_VERSION();")
|
||||||
|
# # the oldClientVersion can't be updated in the same python process,so the version is new compiled verison
|
||||||
|
# oldClientVersion=tdsqlF.queryResult[0][0]
|
||||||
|
# tdLog.info(f"Base client version is {oldClientVersion}")
|
||||||
|
# baseVersion = "3.0.1.8"
|
||||||
|
|
||||||
|
tdLog.printNoPrefix(f"==========step1:prepare and check data in old version-{BASEVERSION}")
|
||||||
|
os.system(f"rm -rf {cPath}/../data")
|
||||||
|
print(self.projPath)
|
||||||
|
# this data file is special for coverage test in 192.168.1.96
|
||||||
|
os.system("cp -r f{self.projPath}/../comp_testdata/data/ {self.projPath}/sim/dnode1")
|
||||||
|
tdDnodes.stop(1)
|
||||||
|
tdDnodes.start(1)
|
||||||
|
|
||||||
|
|
||||||
|
tdsql=tdCom.newTdSql()
|
||||||
|
tdsql.query(f"SELECT SERVER_VERSION();")
|
||||||
|
nowServerVersion=tdsql.queryResult[0][0]
|
||||||
|
tdLog.printNoPrefix(f"==========step3:prepare and check data in new version-{nowServerVersion}")
|
||||||
|
tdsql.query(f"select count(*) from {stb}")
|
||||||
|
tdsql.checkData(0,0,tableNumbers*recordNumbers1)
|
||||||
|
# tdsql.query("show streams;")
|
||||||
|
# os.system(f"taosBenchmark -t {tableNumbers} -n {recordNumbers2} -y ")
|
||||||
|
# tdsql.query("show streams;")
|
||||||
|
# tdsql.query(f"select count(*) from {stb}")
|
||||||
|
# tdsql.checkData(0,0,tableNumbers*recordNumbers2)
|
||||||
|
|
||||||
|
# checkout db4096
|
||||||
|
tdsql.query("select count(*) from db4096.stb0")
|
||||||
|
tdsql.checkData(0,0,50000)
|
||||||
|
|
||||||
|
# checkout deleted data
|
||||||
|
tdsql.execute("insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );")
|
||||||
|
tdsql.execute("flush database deldata;")
|
||||||
|
tdsql.query("select avg(c1) from deldata.ct1;")
|
||||||
|
|
||||||
|
|
||||||
|
tdsql=tdCom.newTdSql()
|
||||||
|
tdLog.printNoPrefix("==========step4:verify backticks in taos Sql-TD18542")
|
||||||
|
tdsql.execute("drop database if exists db")
|
||||||
|
tdsql.execute("create database db")
|
||||||
|
tdsql.execute("use db")
|
||||||
|
tdsql.execute("create stable db.stb1 (ts timestamp, c1 int) tags (t1 int);")
|
||||||
|
tdsql.execute("insert into db.ct1 using db.stb1 TAGS(1) values(now(),11);")
|
||||||
|
tdsql.error(" insert into `db.ct2` using db.stb1 TAGS(9) values(now(),11);")
|
||||||
|
tdsql.error(" insert into db.`db.ct2` using db.stb1 TAGS(9) values(now(),11);")
|
||||||
|
tdsql.execute("insert into `db`.ct3 using db.stb1 TAGS(3) values(now(),13);")
|
||||||
|
tdsql.query("select * from db.ct3")
|
||||||
|
tdsql.checkData(0,1,13)
|
||||||
|
tdsql.execute("insert into db.`ct4` using db.stb1 TAGS(4) values(now(),14);")
|
||||||
|
tdsql.query("select * from db.ct4")
|
||||||
|
tdsql.checkData(0,1,14)
|
||||||
|
|
||||||
|
#check retentions
|
||||||
|
tdsql=tdCom.newTdSql()
|
||||||
|
tdsql.query("describe information_schema.ins_databases;")
|
||||||
|
qRows=tdsql.queryRows
|
||||||
|
comFlag=True
|
||||||
|
j=0
|
||||||
|
while comFlag:
|
||||||
|
for i in range(qRows) :
|
||||||
|
if tdsql.queryResult[i][0] == "retentions" :
|
||||||
|
print("parameters include retentions")
|
||||||
|
comFlag=False
|
||||||
|
break
|
||||||
|
else :
|
||||||
|
comFlag=True
|
||||||
|
j=j+1
|
||||||
|
if j == qRows:
|
||||||
|
print("parameters don't include retentions")
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[0][0])
|
||||||
|
args = (caller.filename, caller.lineno)
|
||||||
|
tdLog.exit("%s(%d) failed" % args)
|
||||||
|
|
||||||
|
# check stream
|
||||||
|
tdsql.query("show streams;")
|
||||||
|
tdsql.checkRows(0)
|
||||||
|
|
||||||
|
#check TS-3131
|
||||||
|
tdsql.query("select *,tbname from d0.almlog where mcid='m0103';")
|
||||||
|
tdsql.checkRows(6)
|
||||||
|
expectList = [0,3003,20031,20032,20033,30031]
|
||||||
|
resultList = []
|
||||||
|
for i in range(6):
|
||||||
|
resultList.append(tdsql.queryResult[i][3])
|
||||||
|
print(resultList)
|
||||||
|
if self.is_list_same_as_ordered_list(resultList,expectList):
|
||||||
|
print("The unordered list is the same as the ordered list.")
|
||||||
|
else:
|
||||||
|
tdLog.exit("The unordered list is not the same as the ordered list.")
|
||||||
|
tdsql.execute("insert into test.d80 values (now+1s, 11, 103, 0.21);")
|
||||||
|
tdsql.execute("insert into test.d9 values (now+5s, 4.3, 104, 0.4);")
|
||||||
|
|
||||||
|
|
||||||
|
# check tmq
|
||||||
|
conn = taos.connect()
|
||||||
|
|
||||||
|
consumer = Consumer(
|
||||||
|
{
|
||||||
|
"group.id": "tg75",
|
||||||
|
"client.id": "124",
|
||||||
|
"td.connect.user": "root",
|
||||||
|
"td.connect.pass": "taosdata",
|
||||||
|
"enable.auto.commit": "true",
|
||||||
|
"experimental.snapshot.enable": "true",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
consumer.subscribe(["tmq_test_topic"])
|
||||||
|
|
||||||
|
while True:
|
||||||
|
res = consumer.poll(10)
|
||||||
|
if not res:
|
||||||
|
break
|
||||||
|
err = res.error()
|
||||||
|
if err is not None:
|
||||||
|
raise err
|
||||||
|
val = res.value()
|
||||||
|
|
||||||
|
for block in val:
|
||||||
|
print(block.fetchall())
|
||||||
|
tdsql.query("show topics;")
|
||||||
|
tdsql.checkRows(1)
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -861,11 +861,56 @@ class TDTestCase:
|
||||||
|
|
||||||
self.support_super_table_test()
|
self.support_super_table_test()
|
||||||
|
|
||||||
|
def initLastRowDelayTest(self, dbname="db"):
|
||||||
|
tdSql.execute(f"drop database if exists {dbname} ")
|
||||||
|
create_db_sql = f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel 'NONE' REPLICA 1"
|
||||||
|
tdSql.execute(create_db_sql)
|
||||||
|
|
||||||
|
time.sleep(3)
|
||||||
|
tdSql.execute(f"use {dbname}")
|
||||||
|
tdSql.execute(f'create stable {dbname}.st(ts timestamp, v_int int, v_float float) TAGS (ctname varchar(32))')
|
||||||
|
|
||||||
|
tdSql.execute(f"create table {dbname}.ct1 using {dbname}.st tags('ct1')")
|
||||||
|
tdSql.execute(f"create table {dbname}.ct2 using {dbname}.st tags('ct2')")
|
||||||
|
|
||||||
|
tdSql.execute(f"insert into {dbname}.st(tbname,ts,v_float, v_int) values('ct1',1630000000000,86,86)")
|
||||||
|
tdSql.execute(f"insert into {dbname}.st(tbname,ts,v_float, v_int) values('ct1',1630000021255,59,59)")
|
||||||
|
tdSql.execute(f'flush database {dbname}')
|
||||||
|
tdSql.execute(f'select last(*) from {dbname}.st')
|
||||||
|
tdSql.execute(f'select last_row(*) from {dbname}.st')
|
||||||
|
tdSql.execute(f"insert into {dbname}.st(tbname,ts) values('ct1',1630000091255)")
|
||||||
|
tdSql.execute(f'flush database {dbname}')
|
||||||
|
tdSql.execute(f'select last(*) from {dbname}.st')
|
||||||
|
tdSql.execute(f'select last_row(*) from {dbname}.st')
|
||||||
|
tdSql.execute(f'alter database {dbname} cachemodel "both"')
|
||||||
|
tdSql.query(f'select last(*) from {dbname}.st')
|
||||||
|
tdSql.checkData(0 , 1 , 59)
|
||||||
|
|
||||||
|
tdSql.query(f'select last_row(*) from {dbname}.st')
|
||||||
|
tdSql.checkData(0 , 1 , None)
|
||||||
|
tdSql.checkData(0 , 2 , None)
|
||||||
|
|
||||||
|
tdLog.printNoPrefix("========== delay test init success ==============")
|
||||||
|
|
||||||
|
def lastRowDelayTest(self, dbname="db"):
|
||||||
|
tdLog.printNoPrefix("========== delay test start ==============")
|
||||||
|
|
||||||
|
tdSql.execute(f"use {dbname}")
|
||||||
|
|
||||||
|
tdSql.query(f'select last(*) from {dbname}.st')
|
||||||
|
tdSql.checkData(0 , 1 , 59)
|
||||||
|
|
||||||
|
tdSql.query(f'select last_row(*) from {dbname}.st')
|
||||||
|
tdSql.checkData(0 , 1 , None)
|
||||||
|
tdSql.checkData(0 , 2 , None)
|
||||||
|
|
||||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||||
# tdSql.prepare()
|
# tdSql.prepare()
|
||||||
|
|
||||||
tdLog.printNoPrefix("==========step1:create table ==============")
|
tdLog.printNoPrefix("==========step1:create table ==============")
|
||||||
|
|
||||||
|
self.initLastRowDelayTest("DELAYTEST")
|
||||||
|
|
||||||
# cache_last 0
|
# cache_last 0
|
||||||
self.prepare_datas("'NONE' ")
|
self.prepare_datas("'NONE' ")
|
||||||
self.prepare_tag_datas("'NONE'")
|
self.prepare_tag_datas("'NONE'")
|
||||||
|
@ -890,6 +935,8 @@ class TDTestCase:
|
||||||
self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step,"'BOTH'")
|
self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step,"'BOTH'")
|
||||||
self.basic_query()
|
self.basic_query()
|
||||||
|
|
||||||
|
self.lastRowDelayTest("DELAYTEST")
|
||||||
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -278,19 +278,19 @@ class TDTestCase:
|
||||||
|
|
||||||
def queryOrderByAgg(self):
|
def queryOrderByAgg(self):
|
||||||
|
|
||||||
tdSql.query("SELECT COUNT(*) FROM t1 order by COUNT(*)")
|
tdSql.no_error("SELECT COUNT(*) FROM t1 order by COUNT(*)")
|
||||||
|
|
||||||
tdSql.query("SELECT COUNT(*) FROM t1 order by last(c2)")
|
tdSql.no_error("SELECT COUNT(*) FROM t1 order by last(c2)")
|
||||||
|
|
||||||
tdSql.query("SELECT c1 FROM t1 order by last(ts)")
|
tdSql.no_error("SELECT c1 FROM t1 order by last(ts)")
|
||||||
|
|
||||||
tdSql.query("SELECT ts FROM t1 order by last(ts)")
|
tdSql.no_error("SELECT ts FROM t1 order by last(ts)")
|
||||||
|
|
||||||
tdSql.query("SELECT last(ts), ts, c1 FROM t1 order by 2")
|
tdSql.no_error("SELECT last(ts), ts, c1 FROM t1 order by 2")
|
||||||
|
|
||||||
tdSql.query("SELECT ts, last(ts) FROM t1 order by last(ts)")
|
tdSql.no_error("SELECT ts, last(ts) FROM t1 order by last(ts)")
|
||||||
|
|
||||||
tdSql.query(f"SELECT * FROM t1 order by last(ts)")
|
tdSql.no_error(f"SELECT * FROM t1 order by last(ts)")
|
||||||
|
|
||||||
tdSql.query(f"SELECT last(ts) as t2, ts FROM t1 order by 1")
|
tdSql.query(f"SELECT last(ts) as t2, ts FROM t1 order by 1")
|
||||||
tdSql.checkRows(1)
|
tdSql.checkRows(1)
|
||||||
|
@ -302,6 +302,18 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.error(f"SELECT last(ts) as t2, ts FROM t1 order by last(t2)")
|
tdSql.error(f"SELECT last(ts) as t2, ts FROM t1 order by last(t2)")
|
||||||
|
|
||||||
|
def queryOrderByAmbiguousName(self):
|
||||||
|
tdSql.error(sql="select c1 as name, c2 as name, c3 from t1 order by name", expectErrInfo='ambiguous',
|
||||||
|
fullMatched=False)
|
||||||
|
|
||||||
|
tdSql.error(sql="select c1, c2 as c1, c3 from t1 order by c1", expectErrInfo='ambiguous', fullMatched=False)
|
||||||
|
|
||||||
|
tdSql.error(sql='select last(ts), last(c1) as name ,last(c2) as name,last(c3) from t1 order by name',
|
||||||
|
expectErrInfo='ambiguous', fullMatched=False)
|
||||||
|
|
||||||
|
tdSql.no_error("select c1 as name, c2 as c1, c3 from t1 order by c1")
|
||||||
|
|
||||||
|
tdSql.no_error('select c1 as name from (select c1, c2 as name from st) order by name')
|
||||||
|
|
||||||
# run
|
# run
|
||||||
def run(self):
|
def run(self):
|
||||||
|
@ -317,6 +329,8 @@ class TDTestCase:
|
||||||
# agg
|
# agg
|
||||||
self.queryOrderByAgg()
|
self.queryOrderByAgg()
|
||||||
|
|
||||||
|
# td-28332
|
||||||
|
self.queryOrderByAmbiguousName()
|
||||||
|
|
||||||
# stop
|
# stop
|
||||||
def stop(self):
|
def stop(self):
|
||||||
|
|
|
@ -51,7 +51,8 @@ class TDTestCase:
|
||||||
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
|
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
|
||||||
tdLog.info("insert data")
|
tdLog.info("insert data")
|
||||||
tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
|
tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
|
||||||
|
tdSql.execute("insert into ctb0 values(now,1,'');")
|
||||||
|
|
||||||
tdLog.info("create topics from stb with filter")
|
tdLog.info("create topics from stb with filter")
|
||||||
queryString = "select ts,c1,c2 from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
|
queryString = "select ts,c1,c2 from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
|
||||||
sqlString = "create topic %s as stable %s.%s" %(topicNameList[0], paraDict["dbName"],paraDict["stbName"])
|
sqlString = "create topic %s as stable %s.%s" %(topicNameList[0], paraDict["dbName"],paraDict["stbName"])
|
||||||
|
|
Loading…
Reference in New Issue