Merge branch '3.0' into feature/TD-14481-3.0

This commit is contained in:
Cary Xu 2022-05-27 19:27:14 +08:00
commit 55fcd7777e
37 changed files with 5995 additions and 244 deletions

View File

@ -479,12 +479,8 @@ int32_t tDecodeSEpSet(SDecoder* pDecoder, SEpSet* pEp);
int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp);
void* taosDecodeSEpSet(const void* buf, SEpSet* pEp);
typedef struct {
SEpSet epSet;
} SMEpSet;
int32_t tSerializeSMEpSet(void* buf, int32_t bufLen, SMEpSet* pReq);
int32_t tDeserializeSMEpSet(void* buf, int32_t buflen, SMEpSet* pReq);
int32_t tSerializeSEpSet(void* buf, int32_t bufLen, const SEpSet* pEpset);
int32_t tDeserializeSEpSet(void* buf, int32_t buflen, SEpSet* pEpset);
typedef struct {
int8_t connType;

View File

@ -247,7 +247,7 @@ typedef enum ELogicConditionType {
#define TSDB_EP_LEN (TSDB_FQDN_LEN + 6)
#define TSDB_IPv4ADDR_LEN 16
#define TSDB_FILENAME_LEN 128
#define TSDB_SHOW_SQL_LEN 512
#define TSDB_SHOW_SQL_LEN 1024
#define TSDB_SLOW_QUERY_SQL_LEN 512
#define TSDB_SHOW_SUBQUERY_LEN 1000

View File

@ -125,11 +125,15 @@ static const SSysDbTableSchema userStbsSchema[] = {
static const SSysDbTableSchema streamSchema[] = {
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "user_name", .bytes = 23, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "dest_table", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
{.name = "sql", .bytes = 1024, .type = TSDB_DATA_TYPE_VARCHAR},
};
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "trigger", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
};
static const SSysDbTableSchema userTblsSchema[] = {
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},

View File

@ -665,22 +665,24 @@ void tFreeSMAltertbReq(SMAlterStbReq *pReq) {
taosArrayDestroy(pReq->pFields);
pReq->pFields = NULL;
}
int32_t tSerializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) {
int32_t tSerializeSEpSet(void *buf, int32_t bufLen, const SEpSet *pEpset) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeSEpSet(&encoder, &pReq->epSet) < 0) return -1;
if (tEncodeSEpSet(&encoder, pEpset) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSMEpSet(void *buf, int32_t bufLen, SMEpSet *pReq) {
int32_t tDeserializeSEpSet(void *buf, int32_t bufLen, SEpSet *pEpset) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeSEpSet(&decoder, &pReq->epSet) < 0) return -1;
if (tDecodeSEpSet(&decoder, pEpset) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);

View File

@ -206,29 +206,28 @@ static inline void dmSendRsp(SRpcMsg *pMsg) {
}
static void dmBuildMnodeRedirectRsp(SDnode *pDnode, SRpcMsg *pMsg) {
SMEpSet msg = {0};
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &msg.epSet);
SEpSet epSet = {0};
dmGetMnodeEpSetForRedirect(&pDnode->data, pMsg, &epSet);
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->pCont = rpcMallocCont(contLen);
if (pMsg->pCont == NULL) {
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
} else {
tSerializeSMEpSet(pMsg->pCont, contLen, &msg);
tSerializeSEpSet(pMsg->pCont, contLen, &epSet);
pMsg->contLen = contLen;
}
}
static inline void dmSendRedirectRsp(SRpcMsg *pMsg, const SEpSet *pNewEpSet) {
SRpcMsg rsp = {.code = TSDB_CODE_RPC_REDIRECT, .info = pMsg->info};
SMEpSet msg = {.epSet = *pNewEpSet};
int32_t contLen = tSerializeSMEpSet(NULL, 0, &msg);
int32_t contLen = tSerializeSEpSet(NULL, 0, pNewEpSet);
rsp.pCont = rpcMallocCont(contLen);
if (rsp.pCont == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
} else {
tSerializeSMEpSet(rsp.pCont, contLen, &msg);
tSerializeSEpSet(rsp.pCont, contLen, pNewEpSet);
rsp.contLen = contLen;
}
dmSendRsp(&rsp);

View File

@ -20,6 +20,7 @@
#include "mndShow.h"
#include "mndTrans.h"
#include "mndUser.h"
#include "mndSync.h"
#define MNODE_VER_NUMBER 1
#define MNODE_RESERVE_SIZE 64
@ -222,23 +223,24 @@ bool mndIsMnode(SMnode *pMnode, int32_t dnodeId) {
}
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet) {
SSdb *pSdb = pMnode->pSdb;
pEpSet->numOfEps = 0;
SSdb *pSdb = pMnode->pSdb;
int32_t totalMnodes = sdbGetSize(pSdb, SDB_MNODE);
void *pIter = NULL;
void *pIter = NULL;
while (1) {
SMnodeObj *pObj = NULL;
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
if (pIter == NULL) break;
if (pObj->pDnode == NULL) {
mError("mnode:%d, no corresponding dnode exists", pObj->id);
} else {
if (pObj->id == pMnode->selfDnodeId || pObj->state == TAOS_SYNC_STATE_LEADER) {
if (pObj->id == pMnode->selfDnodeId) {
if (mndIsMaster(pMnode)) {
pEpSet->inUse = pEpSet->numOfEps;
} else {
pEpSet->inUse = (pEpSet->numOfEps + 1) % totalMnodes;
}
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
sdbRelease(pSdb, pObj);
}
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
sdbRelease(pSdb, pObj);
}
}

View File

@ -236,6 +236,17 @@ void mndSyncStop(SMnode *pMnode) {}
bool mndIsMaster(SMnode *pMnode) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
ESyncState state = syncGetMyRole(pMgmt->sync);
return (state == TAOS_SYNC_STATE_LEADER) && (pMnode->syncMgmt.restored);
if (state != TAOS_SYNC_STATE_LEADER) {
terrno = TSDB_CODE_SYN_NOT_LEADER;
return false;
}
if (!pMgmt->restored) {
terrno = TSDB_CODE_APP_NOT_READY;
return false;
}
return true;
}

View File

@ -408,46 +408,74 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
return code;
}
int32_t mndProcessMsg(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
void *ahandle = pMsg->info.ahandle;
mTrace("msg:%p, will be processed, type:%s app:%p", pMsg, TMSG_INFO(pMsg->msgType), ahandle);
static int32_t mndCheckMnodeMaster(SRpcMsg *pMsg) {
if (!IsReq(pMsg)) return 0;
if (mndIsMaster(pMsg->info.node)) return 0;
if (IsReq(pMsg)) {
if (!mndIsMaster(pMnode)) {
terrno = TSDB_CODE_APP_NOT_READY;
mDebug("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
return -1;
}
if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER ||
pMsg->msgType == TDMT_MND_TRANS_TIMER) {
return -1;
}
mError("msg:%p, failed to check master since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
TMSG_INFO(pMsg->msgType));
if (pMsg->contLen == 0 || pMsg->pCont == NULL) {
terrno = TSDB_CODE_INVALID_MSG_LEN;
mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
return -1;
SEpSet epSet = {0};
mndGetMnodeEpSet(pMsg->info.node, &epSet);
#if 0
mTrace("msg:%p, is redirected, num:%d use:%d", pMsg, epSet.numOfEps, epSet.inUse);
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
mTrace("mnode index:%d %s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
if (strcmp(epSet.eps[i].fqdn, tsLocalFqdn) == 0 && epSet.eps[i].port == tsServerPort) {
epSet.inUse = (i + 1) % epSet.numOfEps;
}
}
#endif
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->info.rsp = rpcMallocCont(contLen);
if (pMsg->info.rsp != NULL) {
tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
pMsg->info.rspLen = contLen;
terrno = TSDB_CODE_RPC_REDIRECT;
} else {
terrno = TSDB_CODE_OUT_OF_MEMORY;
}
return -1;
}
static int32_t mndCheckRequestValid(SRpcMsg *pMsg) {
if (!IsReq(pMsg)) return 0;
if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0;
mError("msg:%p, failed to valid request, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
terrno = TSDB_CODE_INVALID_MSG_LEN;
return -1;
}
int32_t mndProcessMsg(SRpcMsg *pMsg) {
if (mndCheckMnodeMaster(pMsg) != 0) return -1;
if (mndCheckRequestValid(pMsg) != 0) return -1;
SMnode *pMnode = pMsg->info.node;
MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(pMsg->msgType)];
if (fp == NULL) {
mError("msg:%p, failed to get msg handle, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
mError("msg:%p, failed to process since no msg handle, app:%p", pMsg, ahandle);
return -1;
}
mTrace("msg:%p, will be processed in mnode, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
int32_t code = (*fp)(pMsg);
if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
terrno = code;
mTrace("msg:%p, in progress, app:%p", pMsg, ahandle);
} else if (code != 0) {
if (terrno != TSDB_CODE_OPS_NOT_SUPPORT) {
mError("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
} else {
mTrace("msg:%p, failed to process since %s, app:%p", pMsg, terrstr(), ahandle);
}
mTrace("msg:%p, won't response immediately since in progress", pMsg);
} else if (code == 0) {
mTrace("msg:%p, successfully processed and response", pMsg);
} else {
mTrace("msg:%p, is processed, app:%p", pMsg, ahandle);
mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle,
TMSG_INFO(pMsg->msgType));
}
return code;
}

View File

@ -882,6 +882,8 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
// TODO temporarily used, when the statement of "partition by tbname" is ready, remove this
if (pInfo->assignBlockUid) {
pInfo->pRes->info.groupId = uid;
} else {
pInfo->pRes->info.groupId = groupId;
}
int32_t numOfCols = pInfo->pRes->info.numOfCols;

View File

@ -1128,7 +1128,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
continue;
}
pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, 0);
pUpdated = hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, pBlock->info.groupId);
}
finalizeUpdatedResult(pOperator->numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pInfo->binfo.rowCellInfoOffset);

View File

@ -489,7 +489,7 @@ static int32_t translateStateDuration(SFunctionNode* pFunc, char* pErrBuf, int32
static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return TSDB_CODE_SUCCESS;
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
@ -680,17 +680,41 @@ static int32_t translateUnique(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
int32_t paraLen = LIST_LENGTH(pFunc->pParameterList);
if (paraLen == 0 || paraLen > 2) {
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
if (numOfParams == 0 || numOfParams > 2) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
SExprNode* p1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
if (!IS_SIGNED_NUMERIC_TYPE(p1->resType.type) && !IS_FLOAT_TYPE(p1->resType.type) &&
TSDB_DATA_TYPE_BOOL != p1->resType.type) {
//param0
SNode* pParamNode0 = nodesListGetNode(pFunc->pParameterList, 0);
if (nodeType(pParamNode0) != QUERY_NODE_COLUMN) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"The first parameter of DIFF function can only be column");
}
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) &&
TSDB_DATA_TYPE_BOOL != colType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = p1->resType;
//param1
if (numOfParams == 2) {
SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
SValueNode* pValue = (SValueNode*)pParamNode1;
if (pValue->datum.i != 0 && pValue->datum.i != 1) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"Second parameter of DIFF function should be only 0 or 1");
}
pValue->notReserved = true;
}
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
return TSDB_CODE_SUCCESS;
}

View File

@ -55,6 +55,8 @@ typedef struct SVotesRespond SVotesRespond;
typedef struct SSyncIndexMgr SSyncIndexMgr;
typedef struct SRaftCfg SRaftCfg;
typedef struct SSyncRespMgr SSyncRespMgr;
typedef struct SSyncSnapshotSender SSyncSnapshotSender;
typedef struct SSyncSnapshotReceiver SSyncSnapshotReceiver;
typedef struct SSyncNode {
// init by SSyncInfo
@ -148,9 +150,11 @@ typedef struct SSyncNode {
SSyncRespMgr* pSyncRespMgr;
// restore state
bool restoreFinish;
// sem_t restoreSem;
SSnapshot* pSnapshot;
bool restoreFinish;
SSnapshot* pSnapshot;
SSyncSnapshotSender* pSender;
SSyncSnapshotReceiver* pReceiver;
} SSyncNode;

View File

@ -32,20 +32,21 @@ typedef struct SSyncLogStoreData {
SWal* pWal;
} SSyncLogStoreData;
SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode);
void logStoreDestory(SSyncLogStore* pLogStore);
int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index);
int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex);
SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore);
SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore);
int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index);
SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore);
SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore);
cJSON* logStore2Json(SSyncLogStore* pLogStore);
char* logStore2Str(SSyncLogStore* pLogStore);
cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore);
char* logStoreSimple2Str(SSyncLogStore* pLogStore);
SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode);
void logStoreDestory(SSyncLogStore* pLogStore);
cJSON* logStore2Json(SSyncLogStore* pLogStore);
char* logStore2Str(SSyncLogStore* pLogStore);
cJSON* logStoreSimple2Json(SSyncLogStore* pLogStore);
char* logStoreSimple2Str(SSyncLogStore* pLogStore);
// SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore);
// SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore);
// SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore);
// SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index);
// int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
// int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex);
// int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index);
// SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore);
// for debug
void logStorePrint(SSyncLogStore* pLogStore);

View File

@ -23,11 +23,39 @@ extern "C" {
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "cJSON.h"
#include "syncInt.h"
#include "taosdef.h"
int32_t takeSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot);
int32_t restoreSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot);
typedef struct SSyncSnapshotSender {
bool isStart;
int32_t progressIndex;
void * pCurrentBlock;
int32_t len;
SSnapshot *pSnapshot;
SSyncNode *pSyncNode;
} SSyncSnapshotSender;
SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode);
void snapshotSenderDestroy(SSyncSnapshotSender *pSender);
int32_t snapshotSend(SSyncSnapshotSender *pSender);
cJSON * snapshotSender2Json(SSyncSnapshotSender *pSender);
char * snapshotSender2Str(SSyncSnapshotSender *pSender);
typedef struct SSyncSnapshotReceiver {
bool isStart;
int32_t progressIndex;
void * pCurrentBlock;
int32_t len;
SSnapshot *pSnapshot;
SSyncNode *pSyncNode;
} SSyncSnapshotReceiver;
SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode);
void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver);
int32_t snapshotReceive(SSyncSnapshotReceiver *pReceiver);
cJSON * snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver);
char * snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver);
#ifdef __cplusplus
}

View File

@ -107,7 +107,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
SyncTerm localPreLogTerm = 0;
if (pMsg->prevLogIndex >= SYNC_INDEX_BEGIN && pMsg->prevLogIndex <= ths->pLogStore->getLastIndex(ths->pLogStore)) {
SSyncRaftEntry* pEntry = logStoreGetEntry(ths->pLogStore, pMsg->prevLogIndex);
SSyncRaftEntry* pEntry = ths->pLogStore->getEntry(ths->pLogStore, pMsg->prevLogIndex);
assert(pEntry != NULL);
localPreLogTerm = pEntry->term;
syncEntryDestory(pEntry);
@ -175,7 +175,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
bool conflict = false;
SyncIndex extraIndex = pMsg->prevLogIndex + 1;
SSyncRaftEntry* pExtraEntry = logStoreGetEntry(ths->pLogStore, extraIndex);
SSyncRaftEntry* pExtraEntry = ths->pLogStore->getEntry(ths->pLogStore, extraIndex);
assert(pExtraEntry != NULL);
SSyncRaftEntry* pAppendEntry = syncEntryDeserialize(pMsg->data, pMsg->dataLen);
@ -197,7 +197,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
// notice! reverse roll back!
for (SyncIndex index = delEnd; index >= delBegin; --index) {
if (ths->pFsm->FpRollBackCb != NULL) {
SSyncRaftEntry* pRollBackEntry = logStoreGetEntry(ths->pLogStore, index);
SSyncRaftEntry* pRollBackEntry = ths->pLogStore->getEntry(ths->pLogStore, index);
assert(pRollBackEntry != NULL);
// if (pRollBackEntry->msgType != TDMT_VND_SYNC_NOOP) {
@ -365,7 +365,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
}
SReConfigCbMeta cbMeta = {0};
bool isDrop;
bool isDrop;
// I am in newConfig
if (hit) {
@ -388,7 +388,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
}
// always call FpReConfigCb
if (ths->pFsm->FpReConfigCb != NULL) {
if (ths->pFsm->FpReConfigCb != NULL) {
cbMeta.code = 0;
cbMeta.currentTerm = ths->pRaftStore->currentTerm;
cbMeta.index = pEntry->index;

View File

@ -16,6 +16,15 @@
#include "syncRaftLog.h"
#include "wal.h"
static SSyncRaftEntry* logStoreGetLastEntry(SSyncLogStore* pLogStore);
static SyncIndex logStoreLastIndex(SSyncLogStore* pLogStore);
static SyncTerm logStoreLastTerm(SSyncLogStore* pLogStore);
static SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index);
static int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
static int32_t logStoreTruncate(SSyncLogStore* pLogStore, SyncIndex fromIndex);
static int32_t logStoreUpdateCommitIndex(SSyncLogStore* pLogStore, SyncIndex index);
static SyncIndex logStoreGetCommitIndex(SSyncLogStore* pLogStore);
SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
SSyncLogStore* pLogStore = taosMemoryMalloc(sizeof(SSyncLogStore));
assert(pLogStore != NULL);
@ -78,7 +87,9 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
if (index >= SYNC_INDEX_BEGIN && index <= logStoreLastIndex(pLogStore)) {
SWalReadHandle* pWalHandle = walOpenReadHandle(pWal);
int32_t code = walReadWithHandle(pWalHandle, index);
ASSERT(pWalHandle != NULL);
int32_t code = walReadWithHandle(pWalHandle, index);
if (code != 0) {
int32_t err = terrno;
const char* errStr = tstrerror(err);

View File

@ -75,7 +75,7 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {
// SyncIndex lastIndex = syncUtilMinIndex(pSyncNode->pLogStore->getLastIndex(pSyncNode->pLogStore), nextIndex);
SyncAppendEntries* pMsg = NULL;
SSyncRaftEntry* pEntry = logStoreGetEntry(pSyncNode->pLogStore, nextIndex);
SSyncRaftEntry* pEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, nextIndex);
if (pEntry != NULL) {
pMsg = syncAppendEntriesBuild(pEntry->bytes, pSyncNode->vgId);
assert(pMsg != NULL);

View File

@ -15,6 +15,22 @@
#include "syncSnapshot.h"
int32_t takeSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { return 0; }
SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode) { return NULL; }
int32_t restoreSnapshot(SSyncFSM *pFsm, SSnapshot *pSnapshot) { return 0; }
void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {}
int32_t snapshotSend(SSyncSnapshotSender *pSender) { return 0; }
cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { return NULL; }
char *snapshotSender2Str(SSyncSnapshotSender *pSender) { return NULL; }
SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode) { return NULL; }
void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver) {}
int32_t snapshotReceive(SSyncSnapshotReceiver *pReceiver) { return 0; }
cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { return NULL; }
char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) { return NULL; }

View File

@ -78,7 +78,8 @@ int32_t GetSnapshotCb(struct SSyncFSM* pFsm, SSnapshot* pSnapshot) {
void RestoreFinishCb(struct SSyncFSM* pFsm) { sTrace("==callback== ==RestoreFinishCb=="); }
void ReConfigCb(struct SSyncFSM* pFsm, SSyncCfg newCfg, SReConfigCbMeta cbMeta) {
sTrace("==callback== ==ReConfigCb== flag:0x%lX, isDrop:%d, index:%ld, code:%d, currentTerm:%lu, term:%lu", cbMeta.flag, cbMeta.isDrop, cbMeta.index, cbMeta.code, cbMeta.currentTerm, cbMeta.term);
sTrace("==callback== ==ReConfigCb== flag:0x%lX, isDrop:%d, index:%ld, code:%d, currentTerm:%lu, term:%lu",
cbMeta.flag, cbMeta.isDrop, cbMeta.index, cbMeta.code, cbMeta.currentTerm, cbMeta.term);
}
SSyncFSM* createFsm() {

View File

@ -975,9 +975,9 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
if (pResp->contLen == 0) {
pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps;
} else {
SMEpSet emsg = {0};
tDeserializeSMEpSet(pResp->pCont, pResp->contLen, &emsg);
pCtx->epSet = emsg.epSet;
SEpSet epSet = {0};
tDeserializeSEpSet(pResp->pCont, pResp->contLen, &epSet);
pCtx->epSet = epSet;
}
addConnToPool(pThrd->pool, pConn);
tTrace("use remote epset, current in use: %d, retry count:%d, try limit: %d", pEpSet->inUse, pCtx->retryCount + 1,

View File

@ -175,16 +175,17 @@ class TDTestCase:
tdLog.printNoPrefix("==========step10:invalid query type")
tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
tdSql.checkRows(23)
# 非0值均解析为1因此"between 负值 and o"解析为"between 1 and 0"
tdSql.query("select * from supt where isused between 0 and 1")
tdSql.checkRows(23)
tdSql.query("select * from supt where isused between -1 and 0")
tdSql.checkRows(0)
tdSql.error("select * from supt where isused between false and true")
tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
tdSql.checkRows(23)
# TODO tag is not finished
# tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
# tdSql.checkRows(23)
# # 非0值均解析为1因此"between 负值 and o"解析为"between 1 and 0"
# tdSql.query("select * from supt where isused between 0 and 1")
# tdSql.checkRows(23)
# tdSql.query("select * from supt where isused between -1 and 0")
# tdSql.checkRows(0)
# tdSql.error("select * from supt where isused between false and true")
# tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
# tdSql.checkRows(23)
tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type")

View File

@ -36,19 +36,19 @@ class TDTestCase:
concat_condition.extend(
(
char_col,
f"upper( {char_col} )",
# f"upper( {char_col} )",
)
)
concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
# concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
# concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
# concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
@ -96,7 +96,6 @@ class TDTestCase:
[ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
def __concat_err_check(self,tbname):
sqls = []
@ -139,7 +138,11 @@ class TDTestCase:
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
tbname = [
"ct1",
"ct2",
"ct4",
]
for tb in tbname:
for i in range(2,8):
self.__concat_check(tb,i)
@ -147,7 +150,10 @@ class TDTestCase:
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
tbname = [
"t1",
"stb1",
]
for tb in tbname:
for errsql in self.__concat_err_check(tb):

View File

@ -0,0 +1,293 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __concat_condition(self): # sourcery skip: extract-method
concat_condition = []
for char_col in CHAR_COL:
concat_condition.extend(
(
char_col,
# f"upper( {char_col} )",
)
)
concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
# concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
# concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
# concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
concat_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
return concat_condition
def __where_condition(self, col):
# return f" where count({col}) > 0 "
return ""
def __concat_num(self, concat_lists, num):
return [ concat_lists[i] for i in range(num) ]
def __group_condition(self, col, having = ""):
return f" group by {col} having {having}" if having else f" group by {col} "
def __concat_check(self, tbname, num):
concat_condition = self.__concat_condition()
for i in range(len(concat_condition) - num + 1 ):
condition = self.__concat_num(concat_condition[i:], num)
concat_filter = f"concat( {','.join( condition ) }) "
where_condition = self.__where_condition(condition[0])
# group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " )
# group_no_having= self.__group_condition(condition[0] )
concat_group_no_having= self.__group_condition(concat_filter)
groups = ["", concat_group_having, concat_group_no_having]
if num > 8 or num < 2 :
[tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
break
tdSql.query(f"select {','.join(condition)} from {tbname} ")
rows = tdSql.queryRows
concat_data = []
for m in range(rows):
concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None)
tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ")
tdSql.checkRows(rows)
for j in range(tdSql.queryRows):
assert tdSql.getData(j, 0) in concat_data
[ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
def __concat_err_check(self,tbname):
sqls = []
for char_col in CHAR_COL:
sqls.extend(
(
f"select concat( {char_col} ) from {tbname} ",
f"select concat(ceil( {char_col} )) from {tbname} ",
f"select {char_col} from {tbname} group by concat( {char_col} ) ",
)
)
sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL )
sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
sqls.extend(
(
f"select concat() from {tbname} ",
f"select concat(*) from {tbname} ",
f"select concat(ccccccc) from {tbname} ",
f"select concat(111) from {tbname} ",
)
)
return sqls
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = [
"t1",
"stb1",
]
for tb in tbname:
for i in range(2,8):
self.__concat_check(tb,i)
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = [
"ct1",
"ct4",
]
for tb in tbname:
for errsql in self.__concat_err_check(tb):
tdSql.error(sql=errsql)
self.__concat_check(tb,1)
self.__concat_check(tb,9)
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
self.__test_current()
self.__test_error()
def __create_tb(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -36,22 +36,22 @@ class TDTestCase:
concat_ws_condition.extend(
(
char_col,
f"upper( {char_col} )",
# f"upper( {char_col} )",
)
)
concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
# concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
# concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
# concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
# concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
concat_ws_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
@ -139,7 +139,10 @@ class TDTestCase:
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
tbname = [
"t1",
"stb1"
]
for tb in tbname:
for i in range(2,8):
self.__concat_ws_check(tb,i)
@ -147,7 +150,11 @@ class TDTestCase:
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
tbname = [
"ct1",
"ct2",
"ct4",
]
for tb in tbname:
for errsql in self.__concat_ws_err_check(tb):

View File

@ -0,0 +1,294 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __concat_ws_condition(self): # sourcery skip: extract-method
concat_ws_condition = []
for char_col in CHAR_COL:
concat_ws_condition.extend(
(
char_col,
# f"upper( {char_col} )",
)
)
concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL)
concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL )
# concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
# concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
# concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL )
concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL )
for num_col in NUM_COL:
# concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL )
concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL)
# concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL )
concat_ws_condition.append('''"test1234!@#$%^&*():'><?/.,][}{"''')
return concat_ws_condition
def __where_condition(self, col):
# return f" where count({col}) > 0 "
return ""
def __concat_ws_num(self, concat_ws_lists, num):
return [ concat_ws_lists[i] for i in range(num) ]
def __group_condition(self, col, having = ""):
return f" group by {col} having {having}" if having else f" group by {col} "
def __concat_ws_check(self, tbname, num):
concat_ws_condition = self.__concat_ws_condition()
for i in range(len(concat_ws_condition) - num + 1 ):
condition = self.__concat_ws_num(concat_ws_condition[i:], num)
concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) "
where_condition = self.__where_condition(condition[0])
# group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " )
concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " )
# group_no_having= self.__group_condition(condition[0] )
concat_ws_group_no_having= self.__group_condition(concat_ws_filter)
groups = ["", concat_ws_group_having, concat_ws_group_no_having]
if num > 8 or num < 2 :
[tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
break
tdSql.query(f"select {','.join(condition)} from {tbname} ")
rows = tdSql.queryRows
concat_ws_data = []
for m in range(rows):
concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None)
tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ")
tdSql.checkRows(rows)
for j in range(tdSql.queryRows):
assert tdSql.getData(j, 0) in concat_ws_data
[ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ]
def __concat_ws_err_check(self,tbname):
sqls = []
for char_col in CHAR_COL:
sqls.extend(
(
f"select concat_ws('_', {char_col} ) from {tbname} ",
f"select concat_ws('_', ceil( {char_col} )) from {tbname} ",
f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ",
)
)
sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL)
sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL)
sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL)
sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL )
sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL )
sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL )
sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL )
sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL )
sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL )
sqls.extend(
(
f"select concat_ws('_', ) from {tbname} ",
f"select concat_ws('_', *) from {tbname} ",
f"select concat_ws('_', ccccccc) from {tbname} ",
f"select concat_ws('_', 111) from {tbname} ",
)
)
return sqls
def __test_current(self): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = [
"ct1",
"ct2",
"ct4",
]
for tb in tbname:
for i in range(2,8):
self.__concat_ws_check(tb,i)
tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========")
def __test_error(self):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = [
"t1",
"stb1"
]
for tb in tbname:
for errsql in self.__concat_ws_err_check(tb):
tdSql.error(sql=errsql)
self.__concat_ws_check(tb,1)
self.__concat_ws_check(tb,9)
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
self.__test_current()
self.__test_error()
def __create_tb(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,361 @@
import datetime
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __query_condition(self,tbname):
query_condition = [f"cast({col} as bigint)" for col in ALL_COL]
for num_col in NUM_COL:
query_condition.extend(
(
f"{tbname}.{num_col}",
f"abs( {tbname}.{num_col} )",
f"acos( {tbname}.{num_col} )",
f"asin( {tbname}.{num_col} )",
f"atan( {tbname}.{num_col} )",
f"avg( {tbname}.{num_col} )",
f"ceil( {tbname}.{num_col} )",
f"cos( {tbname}.{num_col} )",
f"count( {tbname}.{num_col} )",
f"floor( {tbname}.{num_col} )",
f"log( {tbname}.{num_col}, {tbname}.{num_col})",
f"max( {tbname}.{num_col} )",
f"min( {tbname}.{num_col} )",
f"pow( {tbname}.{num_col}, 2)",
f"round( {tbname}.{num_col} )",
f"sum( {tbname}.{num_col} )",
f"sin( {tbname}.{num_col} )",
f"sqrt( {tbname}.{num_col} )",
f"tan( {tbname}.{num_col} )",
f"cast( {tbname}.{num_col} as timestamp)",
)
)
query_condition.extend((f"{num_col} + {any_col}" for any_col in ALL_COL))
for char_col in CHAR_COL:
query_condition.extend(
(
f"count({tbname}.{char_col})",
f"sum(cast({tbname}.{char_col}) as bigint)",
f"max(cast({tbname}.{char_col}) as bigint)",
f"min(cast({tbname}.{char_col}) as bigint)",
f"avg(cast({tbname}.{char_col}) as bigint)",
)
)
# query_condition.extend(
# (
# 1010,
# )
# )
return query_condition
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
return join_condition
def __where_condition(self, col=None, tbname=None, query_conditon=None):
if query_conditon and isinstance(query_conditon, str):
if query_conditon.startswith("count"):
query_conditon = query_conditon[6:-1]
elif query_conditon.startswith("max"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("sum"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("min"):
query_conditon = query_conditon[4:-1]
if query_conditon:
return f" where {query_conditon} is not null"
if col in NUM_COL:
return f" where abs( {tbname}.{col} ) >= 0"
if col in CHAR_COL:
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
if col in BOOLEAN_COL:
return f" where {tbname}.{col} in (false, true) "
if col in TS_TYPE_COL or col in PRIMARY_COL:
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
return ""
def __group_condition(self, col, having = None):
if isinstance(col, str):
if col.startswith("count"):
col = col[6:-1]
elif col.startswith("max"):
col = col[4:-1]
elif col.startswith("sum"):
col = col[4:-1]
elif col.startswith("min"):
col = col[4:-1]
return f" group by {col} having {having}" if having else f" group by {col} "
def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
return
return f"select hyperloglog({select_clause}) from {from_clause} {where_condition} {group_condition}"
@property
def __tb_list(self):
return [
"ct1",
"ct4",
"t1",
"ct2",
"stb1",
]
def sql_list(self):
sqls = []
__no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
select_claus_list = self.__query_condition(tb)
for select_claus in select_claus_list:
group_claus = self.__group_condition(col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
sqls.extend(
(
self.__single_sql(select_claus, tb, where_claus, having_claus),
self.__single_sql(select_claus, tb,),
self.__single_sql(select_claus, tb, where_condition=where_claus),
self.__single_sql(select_claus, tb, group_condition=group_claus),
)
)
# return filter(None, sqls)
return list(filter(None, sqls))
def __get_type(self, col):
if tdSql.cursor.istype(col, "BOOL"):
return "BOOL"
if tdSql.cursor.istype(col, "INT"):
return "INT"
if tdSql.cursor.istype(col, "BIGINT"):
return "BIGINT"
if tdSql.cursor.istype(col, "TINYINT"):
return "TINYINT"
if tdSql.cursor.istype(col, "SMALLINT"):
return "SMALLINT"
if tdSql.cursor.istype(col, "FLOAT"):
return "FLOAT"
if tdSql.cursor.istype(col, "DOUBLE"):
return "DOUBLE"
if tdSql.cursor.istype(col, "BINARY"):
return "BINARY"
if tdSql.cursor.istype(col, "NCHAR"):
return "NCHAR"
if tdSql.cursor.istype(col, "TIMESTAMP"):
return "TIMESTAMP"
if tdSql.cursor.istype(col, "JSON"):
return "JSON"
if tdSql.cursor.istype(col, "TINYINT UNSIGNED"):
return "TINYINT UNSIGNED"
if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"):
return "SMALLINT UNSIGNED"
if tdSql.cursor.istype(col, "INT UNSIGNED"):
return "INT UNSIGNED"
if tdSql.cursor.istype(col, "BIGINT UNSIGNED"):
return "BIGINT UNSIGNED"
def spread_check(self):
sqls = self.sql_list()
tdLog.printNoPrefix("===step 1: curent case, must return query OK")
for i in range(len(sqls)):
tdLog.info(f"sql: {sqls[i]}")
tdSql.query(sqls[i])
def __test_current(self):
tdSql.query("select hyperloglog(ts) from ct1")
tdSql.checkRows(1)
tdSql.query("select hyperloglog(c1) from ct2")
tdSql.checkRows(1)
tdSql.query("select hyperloglog(c1) from ct4 group by c1")
tdSql.checkRows(self.rows + 3)
tdSql.query("select hyperloglog(c1) from ct4 group by c7")
tdSql.checkRows(3)
tdSql.query("select hyperloglog(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts")
tdSql.checkRows(1)
tdSql.checkData(0, 0, self.rows + 2)
tdSql.query("select hyperloglog(c1), c1 from stb1 group by c1")
for i in range(tdSql.queryRows):
tdSql.checkData(i, 0, 1) if tdSql.queryResult[i][1] is not None else tdSql.checkData(i, 0, 0)
self.spread_check()
def __test_error(self):
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( "select hyperloglog() from ct1" )
tdSql.error( "select hyperloglog(c1, c2) from ct2" )
tdSql.error( "select hyperloglog(1) from ct2" )
tdSql.error( f"select hyperloglog({NUM_COL[0]}, {NUM_COL[1]}) from ct4" )
tdSql.error( ''' select hyperloglog(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
from ct1
where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
def all_test(self):
self.__test_error()
self.__test_current()
def __create_tb(self):
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -36,17 +36,14 @@ class TDTestCase:
query_condition.extend(
(
f"{tbname}.{char_col}",
f"upper( {tbname}.{char_col} )",
# f"upper( {tbname}.{char_col} )",
)
)
query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL)
query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{char_col_2} as binary(32) ) " for char_col_2 in CHAR_COL )
query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{un_char_col} as binary(32) ) " for un_char_col in NUM_COL )
for num_col in NUM_COL:
query_condition.extend(
(
f"{tbname}.{num_col}",
f"sin( {tbname}.{num_col} )"
f"sin( {tbname}.{num_col} )",
)
)
query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL )
@ -55,41 +52,115 @@ class TDTestCase:
return query_condition
def __join_condition(self, tb_list, filter=PRIMARY_COL):
# sourcery skip: flip-comparison
if 1 == len(tb_list):
join_filter = f"{tb_list[0]}.{filter} = {tb_list[0]}.{filter} "
elif 2 == len(tb_list):
join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} "
else:
join_filter = f"{tb_list[0]}.{filter} = {tb_list[1]}.{filter} "
for i in range(1, len(tb_list)-1 ):
join_filter += f"and {tb_list[i]}.{filter} = {tb_list[i+1]}.{filter}"
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
return join_filter
return join_condition
def __where_condition(self, col, tbname):
def __where_condition(self, col=None, tbname=None, query_conditon=None):
if query_conditon and isinstance(query_conditon, str):
if query_conditon.startswith("count"):
query_conditon = query_conditon[6:-1]
elif query_conditon.startswith("max"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("sum"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("min"):
query_conditon = query_conditon[4:-1]
if query_conditon:
return f" where {query_conditon} is not null"
if col in NUM_COL:
return f" abs( {tbname}.{col} ) >= 0"
elif col in CHAR_COL:
return f" lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
elif col in BOOLEAN_COL:
return f" {tbname}.{col} in (false, true) "
elif col in TS_TYPE_COL or col in PRIMARY_COL:
return f" cast( {tbname}.{col} as binary(16) ) is not null "
else:
return ""
return f" where abs( {tbname}.{col} ) >= 0"
if col in CHAR_COL:
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
if col in BOOLEAN_COL:
return f" where {tbname}.{col} in (false, true) "
if col in TS_TYPE_COL or col in PRIMARY_COL:
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
def __group_condition(self, tbname, col, having = ""):
return ""
def __group_condition(self, col, having = None):
if isinstance(col, str):
if col.startswith("count"):
col = col[6:-1]
elif col.startswith("max"):
col = col[4:-1]
elif col.startswith("sum"):
col = col[4:-1]
elif col.startswith("min"):
col = col[4:-1]
return f" group by {col} having {having}" if having else f" group by {col} "
def __join_check(self, tblist, checkrows, join_flag=True):
def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
return
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
def __join_tblist(self):
return [
# ["ct1", "ct2"],
["ct1", "ct4"],
["ct1", "t1"],
# ["ct2", "ct4"],
# ["ct2", "t1"],
# ["ct4", "t1"],
# ["ct1", "ct2", "ct4"],
# ["ct1", "ct2", "t1"],
# ["ct1", "ct4", "t1"],
# ["ct2", "ct4", "t1"],
# ["ct1", "ct2", "ct4", "t1"],
]
@property
def __sqls_list(self):
sqls = []
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
for join_tb in join_tblist:
select_claus_list = self.__query_condition(join_tb)
for select_claus in select_claus_list:
group_claus = self.__group_condition( col=select_claus)
where_claus = self.__where_condition( query_conditon=select_claus )
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
sqls.extend(
(
# self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus),
# self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist)),
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ),
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ),
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ),
)
)
return list(filter(None, sqls))
def __join_check(self,):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
for i in range(len(self.__sqls_list)):
tdSql.query(self.__sqls_list[i])
# if i % 10 == 0 :
# tdLog.success(f"{i} sql is already executed success !")
def __join_check_old(self, tblist, checkrows, join_flag=True):
query_conditions = self.__query_condition(tblist[0])
join_condition = self.__join_condition(tb_list=tblist) if join_flag else " "
for condition in query_conditions:
where_condition = self.__where_condition(col=condition, tbname=tblist[0])
group_having = self.__group_condition(tbname=tblist[0], col=condition, having=f"{condition} is not null " )
group_no_having= self.__group_condition(tbname=tblist[0], col=condition )
group_having = self.__group_condition(col=condition, having=f"{condition} is not null " )
group_no_having= self.__group_condition(col=condition )
groups = ["", group_having, group_no_having]
for group_condition in groups:
if where_condition:
@ -116,23 +187,6 @@ class TDTestCase:
tdSql.query(sql=sql)
# tdSql.checkRows(checkrows)
def __test_current(self):
# sourcery skip: extract-duplicate-method, inline-immediately-returned-variable
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tblist_1 = ["ct1", "ct2"]
self.__join_check(tblist_1, 1)
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_1} over==========")
tblist_2 = ["ct2", "ct4"]
self.__join_check(tblist_2, self.rows)
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_2} over==========")
tblist_3 = ["t1", "ct4"]
self.__join_check(tblist_3, 1)
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_3} over==========")
tblist_4 = ["t1", "ct1"]
self.__join_check(tblist_4, 1)
tdLog.printNoPrefix(f"==========current sql condition check in {tblist_4} over==========")
def __test_error(self):
# sourcery skip: extract-duplicate-method, move-assign-in-block
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
@ -141,17 +195,17 @@ class TDTestCase:
err_list_3 = ["ct1","ct4", "t1"]
err_list_4 = ["ct2","ct4", "t1"]
err_list_5 = ["ct1", "ct2","ct4", "t1"]
self.__join_check(err_list_1, -1)
self.__join_check_old(err_list_1, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========")
self.__join_check(err_list_2, -1)
self.__join_check_old(err_list_2, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========")
self.__join_check(err_list_3, -1)
self.__join_check_old(err_list_3, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========")
self.__join_check(err_list_4, -1)
self.__join_check_old(err_list_4, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========")
self.__join_check(err_list_5, -1)
self.__join_check_old(err_list_5, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========")
self.__join_check(["ct2", "ct4"], -1, join_flag=False)
self.__join_check_old(["ct2", "ct4"], -1, join_flag=False)
tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========")
tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
@ -172,7 +226,7 @@ class TDTestCase:
def all_test(self):
self.__test_current()
self.__join_check()
self.__test_error()

View File

@ -0,0 +1,357 @@
import datetime
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
def __query_condition(self,tbname):
query_condition = []
for char_col in CHAR_COL:
query_condition.extend(
(
f"{tbname}.{char_col}",
# f"upper( {tbname}.{char_col} )",
)
)
query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL)
for num_col in NUM_COL:
query_condition.extend(
(
f"sin( {tbname}.{num_col} )",
)
)
query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL )
query_condition.append(''' "test1234!@#$%^&*():'><?/.,][}{" ''')
return query_condition
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
return join_condition
def __where_condition(self, col=None, tbname=None, query_conditon=None):
if query_conditon and isinstance(query_conditon, str):
if query_conditon.startswith("count"):
query_conditon = query_conditon[6:-1]
elif query_conditon.startswith("max"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("sum"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("min"):
query_conditon = query_conditon[4:-1]
if query_conditon:
return f" where {query_conditon} is not null"
if col in NUM_COL:
return f" where abs( {tbname}.{col} ) >= 0"
if col in CHAR_COL:
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
if col in BOOLEAN_COL:
return f" where {tbname}.{col} in (false, true) "
if col in TS_TYPE_COL or col in PRIMARY_COL:
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
return ""
def __group_condition(self, col, having = None):
if isinstance(col, str):
if col.startswith("count"):
col = col[6:-1]
elif col.startswith("max"):
col = col[4:-1]
elif col.startswith("sum"):
col = col[4:-1]
elif col.startswith("min"):
col = col[4:-1]
return f" group by {col} having {having}" if having else f" group by {col} "
def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
return
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
def __join_tblist(self):
return [
# ["ct1", "ct2"],
# ["ct1", "ct4"],
# ["ct1", "t1"],
["ct2", "ct4"],
# ["ct2", "t1"],
["ct4", "t1"],
# ["ct1", "ct2", "ct4"],
# ["ct1", "ct2", "t1"],
# ["ct1", "ct4", "t1"],
# ["ct2", "ct4", "t1"],
# ["ct1", "ct2", "ct4", "t1"],
]
@property
def __sqls_list(self):
sqls = []
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
for join_tb in join_tblist:
select_claus_list = self.__query_condition(join_tb)
for select_claus in select_claus_list:
group_claus = self.__group_condition( col=select_claus)
where_claus = self.__where_condition( query_conditon=select_claus )
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
sqls.extend(
(
# self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus),
# self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist)),
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ),
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ),
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ),
)
)
return list(filter(None, sqls))
def __join_check(self,):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
for i in range(len(self.__sqls_list)):
tdSql.query(self.__sqls_list[i])
# if i % 10 == 0 :
# tdLog.success(f"{i} sql is already executed success !")
def __join_check_old(self, tblist, checkrows, join_flag=True):
query_conditions = self.__query_condition(tblist[0])
join_condition = self.__join_condition(tb_list=tblist) if join_flag else " "
for condition in query_conditions:
where_condition = self.__where_condition(col=condition, tbname=tblist[0])
group_having = self.__group_condition(col=condition, having=f"{condition} is not null " )
group_no_having= self.__group_condition(col=condition )
groups = ["", group_having, group_no_having]
for group_condition in groups:
if where_condition:
sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} and {where_condition} {group_condition} "
else:
sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} {group_condition} "
if not join_flag :
tdSql.error(sql=sql)
break
if len(tblist) == 2:
if "ct1" in tblist or "t1" in tblist:
self.__join_current(sql, checkrows)
elif where_condition or "not null" in group_condition:
self.__join_current(sql, checkrows + 2 )
elif group_condition:
self.__join_current(sql, checkrows + 3 )
else:
self.__join_current(sql, checkrows + 5 )
if len(tblist) > 2 or len(tblist) < 1:
tdSql.error(sql=sql)
def __join_current(self, sql, checkrows):
tdSql.query(sql=sql)
# tdSql.checkRows(checkrows)
def __test_error(self):
# sourcery skip: extract-duplicate-method, move-assign-in-block
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
err_list_1 = ["ct1","ct2", "ct4"]
err_list_2 = ["ct1","ct2", "t1"]
err_list_3 = ["ct1","ct4", "t1"]
err_list_4 = ["ct2","ct4", "t1"]
err_list_5 = ["ct1", "ct2","ct4", "t1"]
self.__join_check_old(err_list_1, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========")
self.__join_check_old(err_list_2, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========")
self.__join_check_old(err_list_3, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========")
self.__join_check_old(err_list_4, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========")
self.__join_check_old(err_list_5, -1)
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========")
self.__join_check_old(["ct2", "ct4"], -1, join_flag=False)
tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========")
tdSql.error( f"select c1, c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{INT_COL}=ct4.{INT_COL}" )
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{TS_COL}=ct4.{TS_COL}" )
tdSql.error( f"select ct2.c1, ct2.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{TS_COL}" )
tdSql.error( f"select ct2.c1, ct1.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and c1 is not null " )
tdSql.error( f"select ct2.c1, ct4.c2 from ct2, ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and ct1.c1 is not null " )
tbname = ["ct1", "ct2", "ct4", "t1"]
# for tb in tbname:
# for errsql in self.__join_err_check(tb):
# tdSql.error(sql=errsql)
# tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
def all_test(self):
self.__join_check()
self.__test_error()
def __create_tb(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,358 @@
import datetime
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __query_condition(self,tbname):
query_condition = [f"cast({col} as bigint)" for col in ALL_COL]
for num_col in NUM_COL:
query_condition.extend(
(
f"{tbname}.{num_col}",
f"abs( {tbname}.{num_col} )",
f"acos( {tbname}.{num_col} )",
f"asin( {tbname}.{num_col} )",
f"atan( {tbname}.{num_col} )",
f"avg( {tbname}.{num_col} )",
f"ceil( {tbname}.{num_col} )",
f"cos( {tbname}.{num_col} )",
f"count( {tbname}.{num_col} )",
f"floor( {tbname}.{num_col} )",
f"log( {tbname}.{num_col}, {tbname}.{num_col})",
f"max( {tbname}.{num_col} )",
f"min( {tbname}.{num_col} )",
f"pow( {tbname}.{num_col}, 2)",
f"round( {tbname}.{num_col} )",
f"sum( {tbname}.{num_col} )",
f"sin( {tbname}.{num_col} )",
f"sqrt( {tbname}.{num_col} )",
f"tan( {tbname}.{num_col} )",
f"cast( {tbname}.{num_col} as timestamp)",
)
)
[ query_condition.append(f"{num_col} + {any_col}") for any_col in ALL_COL ]
for char_col in CHAR_COL:
query_condition.extend(
(
f"count({tbname}.{char_col})",
f"sum(cast({tbname}.{char_col}) as bigint)",
f"max(cast({tbname}.{char_col}) as bigint)",
f"min(cast({tbname}.{char_col}) as bigint)",
f"avg(cast({tbname}.{char_col}) as bigint)",
)
)
query_condition.extend(
(
1010,
)
)
return query_condition
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
return join_condition
def __where_condition(self, col=None, tbname=None, query_conditon=None):
if query_conditon and isinstance(query_conditon, str):
if query_conditon.startswith("count"):
query_conditon = query_conditon[6:-1]
elif query_conditon.startswith("max"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("sum"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("min"):
query_conditon = query_conditon[4:-1]
if query_conditon:
return f" where {query_conditon} is not null"
if col in NUM_COL:
return f" where abs( {tbname}.{col} ) >= 0"
if col in CHAR_COL:
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
if col in BOOLEAN_COL:
return f" where {tbname}.{col} in (false, true) "
if col in TS_TYPE_COL or col in PRIMARY_COL:
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
return ""
def __group_condition(self, col, having = None):
if isinstance(col, str):
if col.startswith("count"):
col = col[6:-1]
elif col.startswith("max"):
col = col[4:-1]
elif col.startswith("sum"):
col = col[4:-1]
elif col.startswith("min"):
col = col[4:-1]
return f" group by {col} having {having}" if having else f" group by {col} "
def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
return
return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}"
@property
def __tb_list(self):
return [
"ct1",
"ct4",
"t1",
"ct2",
"stb1",
]
def sql_list(self):
sqls = []
__no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
select_claus_list = self.__query_condition(tb)
for select_claus in select_claus_list:
group_claus = self.__group_condition(col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
sqls.extend(
(
self.__single_sql(select_claus, tb, where_claus, having_claus),
self.__single_sql(select_claus, tb,),
self.__single_sql(select_claus, tb, where_condition=where_claus),
self.__single_sql(select_claus, tb, group_condition=group_claus),
)
)
# return filter(None, sqls)
return list(filter(None, sqls))
def __get_type(self, col):
if tdSql.cursor.istype(col, "BOOL"):
return "BOOL"
if tdSql.cursor.istype(col, "INT"):
return "INT"
if tdSql.cursor.istype(col, "BIGINT"):
return "BIGINT"
if tdSql.cursor.istype(col, "TINYINT"):
return "TINYINT"
if tdSql.cursor.istype(col, "SMALLINT"):
return "SMALLINT"
if tdSql.cursor.istype(col, "FLOAT"):
return "FLOAT"
if tdSql.cursor.istype(col, "DOUBLE"):
return "DOUBLE"
if tdSql.cursor.istype(col, "BINARY"):
return "BINARY"
if tdSql.cursor.istype(col, "NCHAR"):
return "NCHAR"
if tdSql.cursor.istype(col, "TIMESTAMP"):
return "TIMESTAMP"
if tdSql.cursor.istype(col, "JSON"):
return "JSON"
if tdSql.cursor.istype(col, "TINYINT UNSIGNED"):
return "TINYINT UNSIGNED"
if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"):
return "SMALLINT UNSIGNED"
if tdSql.cursor.istype(col, "INT UNSIGNED"):
return "INT UNSIGNED"
if tdSql.cursor.istype(col, "BIGINT UNSIGNED"):
return "BIGINT UNSIGNED"
def spread_check(self):
sqls = self.sql_list()
tdLog.printNoPrefix("===step 1: curent case, must return query OK")
for i in range(len(sqls)):
tdLog.info(f"sql: {sqls[i]}")
tdSql.query(sqls[i])
def __test_current(self):
tdSql.query("select spread(ts) from ct1")
tdSql.checkRows(1)
tdSql.query("select spread(c1) from ct2")
tdSql.checkRows(1)
tdSql.query("select spread(c1) from ct4 group by c1")
tdSql.checkRows(self.rows + 3)
tdSql.query("select spread(c1) from ct4 group by c7")
tdSql.checkRows(3)
tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts")
tdSql.checkRows(1)
self.spread_check()
def __test_error(self):
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( "select spread() from ct1" )
tdSql.error( "select spread(1, 2) from ct2" )
tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" )
tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" )
tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" )
# tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
# from ct1
# where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
# group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
# having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
# tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
def all_test(self):
self.__test_error()
self.__test_current()
def __create_tb(self):
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -218,13 +218,13 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.stop(1)
tdDnodes.start(1)
# tdSql.execute("use db")
tdSql.execute("use db")
# tdLog.printNoPrefix("==========step4:after wal, all check again ")
# self.all_test()
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()

View File

@ -35,8 +35,6 @@ class TDTestCase:
for char_col in CHAR_COL:
query_condition.extend(
(
f"rtrim( {tbname}.{char_col} )",
f"substr( {tbname}.{char_col}, 1 )",
f"count( {tbname}.{char_col} )",
f"cast( {tbname}.{char_col} as nchar(3) )",
)
@ -45,11 +43,7 @@ class TDTestCase:
for num_col in NUM_COL:
query_condition.extend(
(
f"{tbname}.{num_col}",
f"floor( {tbname}.{num_col} )",
f"log( {tbname}.{num_col}, {tbname}.{num_col})",
f"sin( {tbname}.{num_col} )",
f"sqrt( {tbname}.{num_col} )",
)
)
@ -96,7 +90,6 @@ class TDTestCase:
return ""
def __group_condition(self, col, having = None):
if isinstance(col, str):
if col.startswith("count"):
@ -114,15 +107,10 @@ class TDTestCase:
return
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
def __join_tblist(self):
return [
["ct1", "ct2"],
["ct1", "ct4"],
["ct1", "t1"],
["ct2", "ct4"],
["ct2", "t1"],
["ct4", "t1"],
# ["ct1", "ct2", "ct4"],
# ["ct1", "ct2", "t1"],
@ -135,9 +123,7 @@ class TDTestCase:
def __tb_liast(self):
return [
"ct1",
"ct2",
"ct4",
"t1",
]
def sql_list(self):
@ -152,15 +138,7 @@ class TDTestCase:
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null")
sqls.extend(
(
self.__single_sql(select_claus, join_tb, where_claus, group_claus),
self.__single_sql(select_claus, join_tb, where_claus, having_claus),
self.__single_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
self.__single_sql(select_claus, join_tb, where_claus),
self.__single_sql(select_claus, join_tb, having_claus),
self.__single_sql(select_claus, join_tb, group_claus),
self.__single_sql(select_claus, join_tb),
)
)
__no_join_tblist = self.__tb_liast
@ -172,12 +150,7 @@ class TDTestCase:
having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
sqls.extend(
(
self.__single_sql(select_claus, join_tb, where_claus, group_claus),
self.__single_sql(select_claus, join_tb, where_claus, having_claus),
self.__single_sql(select_claus, join_tb, where_claus),
self.__single_sql(select_claus, join_tb, group_claus),
self.__single_sql(select_claus, join_tb, having_claus),
self.__single_sql(select_claus, join_tb),
self.__single_sql(select_claus, tb, where_claus, having_claus),
)
)
@ -221,6 +194,8 @@ class TDTestCase:
for i in range(len(sqls)):
tdSql.query(sqls[i])
res1_type = self.__get_type(0)
# if i % 5 == 0:
# tdLog.success(f"{i} : sql is already executing!")
for j in range(len(sqls[i:])):
tdSql.query(sqls[j+i])
order_union_type = False
@ -246,22 +221,12 @@ class TDTestCase:
rev_order_type = True
if all_union_type:
tdSql.query(f"{sqls[i]} union {sqls[j+i]}")
tdSql.query(f"{sqls[j+i]} union {sqls[i]}")
tdSql.checkCols(1)
tdSql.query(f"{sqls[i]} union all {sqls[j+i]}")
tdSql.query(f"{sqls[j+i]} union all {sqls[i]}")
tdSql.checkCols(1)
tdSql.execute(f"{sqls[i]} union {sqls[j+i]}")
tdSql.execute(f"{sqls[j+i]} union all {sqls[i]}")
elif order_union_type:
tdSql.query(f"{sqls[i]} union {sqls[j+i]}")
tdSql.checkCols(1)
tdSql.query(f"{sqls[i]} union all {sqls[j+i]}")
tdSql.checkCols(1)
tdSql.execute(f"{sqls[i]} union all {sqls[j+i]}")
elif rev_order_type:
tdSql.query(f"{sqls[j+i]} union {sqls[i]}")
tdSql.checkCols(1)
tdSql.query(f"{sqls[j+i]} union all {sqls[i]}")
tdSql.checkCols(1)
tdSql.execute(f"{sqls[j+i]} union {sqls[i]}")
else:
tdSql.error(f"{sqls[i]} union {sqls[j+i]}")
@ -273,7 +238,7 @@ class TDTestCase:
tdSql.error( "select c1 from ct1 union all drop table ct3" )
tdSql.error( "select c1 from ct1 union all '' " )
tdSql.error( " '' union all select c1 from ct1 " )
tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
# tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
def all_test(self):
self.__test_error()

View File

@ -0,0 +1,370 @@
import datetime
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c1"
BINT_COL = "c2"
SINT_COL = "c3"
TINT_COL = "c4"
FLOAT_COL = "c5"
DOUBLE_COL = "c6"
BOOL_COL = "c7"
BINARY_COL = "c8"
NCHAR_COL = "c9"
TS_COL = "c10"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def __query_condition(self,tbname):
query_condition = []
for char_col in CHAR_COL:
query_condition.extend(
(
f"count( {tbname}.{char_col} )",
f"cast( {tbname}.{char_col} as nchar(3) )",
)
)
for num_col in NUM_COL:
query_condition.extend(
(
f"log( {tbname}.{num_col}, {tbname}.{num_col})",
)
)
query_condition.extend(
(
''' "test12" ''',
# 1010,
)
)
return query_condition
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
return join_condition
def __where_condition(self, col=None, tbname=None, query_conditon=None):
if query_conditon and isinstance(query_conditon, str):
if query_conditon.startswith("count"):
query_conditon = query_conditon[6:-1]
elif query_conditon.startswith("max"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("sum"):
query_conditon = query_conditon[4:-1]
elif query_conditon.startswith("min"):
query_conditon = query_conditon[4:-1]
if query_conditon:
return f" where {query_conditon} is not null"
if col in NUM_COL:
return f" where abs( {tbname}.{col} ) >= 0"
if col in CHAR_COL:
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
if col in BOOLEAN_COL:
return f" where {tbname}.{col} in (false, true) "
if col in TS_TYPE_COL or col in PRIMARY_COL:
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
return ""
def __group_condition(self, col, having = None):
if isinstance(col, str):
if col.startswith("count"):
col = col[6:-1]
elif col.startswith("max"):
col = col[4:-1]
elif col.startswith("sum"):
col = col[4:-1]
elif col.startswith("min"):
col = col[4:-1]
return f" group by {col} having {having}" if having else f" group by {col} "
def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
return
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
def __join_tblist(self):
return [
["ct1", "ct2"],
# ["ct1", "ct2", "ct4"],
# ["ct1", "ct2", "t1"],
# ["ct1", "ct4", "t1"],
# ["ct2", "ct4", "t1"],
# ["ct1", "ct2", "ct4", "t1"],
]
@property
def __tb_liast(self):
return [
"t1",
"stb1",
]
def sql_list(self):
sqls = []
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
for join_tb in join_tblist:
select_claus_list = self.__query_condition(join_tb)
for select_claus in select_claus_list:
group_claus = self.__group_condition( col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null")
sqls.extend(
(
self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
)
)
__no_join_tblist = self.__tb_liast
for tb in __no_join_tblist:
select_claus_list = self.__query_condition(tb)
for select_claus in select_claus_list:
group_claus = self.__group_condition(col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
sqls.extend(
(
self.__single_sql(select_claus, tb, where_claus, having_claus),
)
)
# return filter(None, sqls)
return list(filter(None, sqls))
def __get_type(self, col):
if tdSql.cursor.istype(col, "BOOL"):
return "BOOL"
if tdSql.cursor.istype(col, "INT"):
return "INT"
if tdSql.cursor.istype(col, "BIGINT"):
return "BIGINT"
if tdSql.cursor.istype(col, "TINYINT"):
return "TINYINT"
if tdSql.cursor.istype(col, "SMALLINT"):
return "SMALLINT"
if tdSql.cursor.istype(col, "FLOAT"):
return "FLOAT"
if tdSql.cursor.istype(col, "DOUBLE"):
return "DOUBLE"
if tdSql.cursor.istype(col, "BINARY"):
return "BINARY"
if tdSql.cursor.istype(col, "NCHAR"):
return "NCHAR"
if tdSql.cursor.istype(col, "TIMESTAMP"):
return "TIMESTAMP"
if tdSql.cursor.istype(col, "JSON"):
return "JSON"
if tdSql.cursor.istype(col, "TINYINT UNSIGNED"):
return "TINYINT UNSIGNED"
if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"):
return "SMALLINT UNSIGNED"
if tdSql.cursor.istype(col, "INT UNSIGNED"):
return "INT UNSIGNED"
if tdSql.cursor.istype(col, "BIGINT UNSIGNED"):
return "BIGINT UNSIGNED"
def union_check(self):
sqls = self.sql_list()
for i in range(len(sqls)):
tdSql.query(sqls[i])
res1_type = self.__get_type(0)
# if i % 5 == 0:
# tdLog.success(f"{i} : sql is already executing!")
for j in range(len(sqls[i:])):
tdSql.query(sqls[j+i])
order_union_type = False
rev_order_type = False
all_union_type = False
res2_type = self.__get_type(0)
if res2_type == res1_type:
all_union_type = True
elif res1_type in ( "BIGINT" , "NCHAR" ) and res2_type in ("BIGINT" , "NCHAR"):
all_union_type = True
elif res1_type in ("BIGINT", "NCHAR"):
order_union_type = True
elif res2_type in ("BIGINT", "NCHAR"):
rev_order_type = True
elif res1_type == "TIMESAMP" and res2_type not in ("BINARY", "NCHAR"):
order_union_type = True
elif res2_type == "TIMESAMP" and res1_type not in ("BINARY", "NCHAR"):
rev_order_type = True
elif res1_type == "BINARY" and res2_type != "NCHAR":
order_union_type = True
elif res2_type == "BINARY" and res1_type != "NCHAR":
rev_order_type = True
if all_union_type:
tdSql.execute(f"{sqls[i]} union {sqls[j+i]}")
tdSql.execute(f"{sqls[j+i]} union all {sqls[i]}")
elif order_union_type:
tdSql.execute(f"{sqls[i]} union all {sqls[j+i]}")
elif rev_order_type:
tdSql.execute(f"{sqls[j+i]} union {sqls[i]}")
else:
tdSql.error(f"{sqls[i]} union {sqls[j+i]}")
def __test_error(self):
tdSql.error( "show tables union show tables" )
tdSql.error( "create table errtb1 union all create table errtb2" )
tdSql.error( "drop table ct1 union all drop table ct3" )
tdSql.error( "select c1 from ct1 union all drop table ct3" )
tdSql.error( "select c1 from ct1 union all '' " )
tdSql.error( " '' union all select c1 from ct1 " )
# tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
def all_test(self):
self.__test_error()
self.union_check()
def __create_tb(self):
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
def __insert_data(self, rows):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
f'''insert into ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
insert_data = f'''insert into t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
def run(self):
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
tdLog.printNoPrefix("==========step2:insert data")
self.rows = 10
self.__insert_data(self.rows)
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -446,7 +446,7 @@ class TDTestCase:
event.wait()
tdLog.info("start consume processor")
pollDelay = 5
pollDelay = 10
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
@ -622,7 +622,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
if totalConsumeRows < expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")

View File

@ -291,10 +291,9 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdLog.info("again start consume processer")
self.initConsumerTable()
@ -303,12 +302,13 @@ class TDTestCase:
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
totalConsumeRows2 = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt/2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/2))
totalConsumeRows2 += resultList[i]
tdLog.info("firstly act consume rows: %d"%(totalConsumeRows))
tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt))
if totalConsumeRows + totalConsumeRows2 != expectrowcnt:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicName1)

View File

@ -407,9 +407,9 @@ class TDTestCase:
totalConsumeRows += resultList[i]
remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum)
tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt))
if not (totalConsumeRows <= expectrowcnt and totalConsumeRows >= remaindrowcnt):
tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)

View File

@ -14,7 +14,7 @@ python3 ./test.py -f 0-others/udf_restart_taosd.py
python3 ./test.py -f 0-others/user_control.py
python3 ./test.py -f 0-others/fsync.py
#python3 ./test.py -f 2-query/between.py
python3 ./test.py -f 2-query/between.py
python3 ./test.py -f 2-query/distinct.py
python3 ./test.py -f 2-query/varchar.py
python3 ./test.py -f 2-query/ltrim.py
@ -23,15 +23,19 @@ python3 ./test.py -f 2-query/length.py
python3 ./test.py -f 2-query/char_length.py
python3 ./test.py -f 2-query/upper.py
python3 ./test.py -f 2-query/lower.py
#python3 ./test.py -f 2-query/join.py
python3 ./test.py -f 2-query/join.py
python3 ./test.py -f 2-query/join2.py
python3 ./test.py -f 2-query/cast.py
#python3 ./test.py -f 2-query/concat.py
#python3 ./test.py -f 2-query/concat_ws.py
python3 ./test.py -f 2-query/union.py
python3 ./test.py -f 2-query/union1.py
python3 ./test.py -f 2-query/concat.py
python3 ./test.py -f 2-query/concat2.py
python3 ./test.py -f 2-query/concat_ws.py
python3 ./test.py -f 2-query/concat_ws2.py
python3 ./test.py -f 2-query/check_tsdb.py
# python3 ./test.py -f 2-query/union.py
# python3 ./test.py -f 2-query/union2.py
# python3 ./test.py -f 2-query/union3.py
# python3 ./test.py -f 2-query/union4.py
python3 ./test.py -f 2-query/spread.py
python3 ./test.py -f 2-query/hyperloglog.py
python3 ./test.py -f 2-query/timezone.py
python3 ./test.py -f 2-query/Now.py
@ -47,7 +51,6 @@ python3 ./test.py -f 2-query/timetruncate.py
python3 ./test.py -f 2-query/diff.py
python3 ./test.py -f 2-query/Timediff.py
#python3 ./test.py -f 2-query/cast.py
python3 ./test.py -f 2-query/top.py
python3 ./test.py -f 2-query/bottom.py
@ -66,7 +69,7 @@ python3 ./test.py -f 2-query/arcsin.py
python3 ./test.py -f 2-query/arccos.py
python3 ./test.py -f 2-query/arctan.py
python3 ./test.py -f 2-query/query_cols_tags_and_or.py
#python3 ./test.py -f 2-query/nestedQuery.py
# python3 ./test.py -f 2-query/nestedQuery.py
python3 ./test.py -f 7-tmq/basic5.py
python3 ./test.py -f 7-tmq/subscribeDb.py
@ -75,4 +78,3 @@ python3 ./test.py -f 7-tmq/subscribeStb.py
python3 ./test.py -f 7-tmq/subscribeStb0.py
python3 ./test.py -f 7-tmq/subscribeStb1.py
python3 ./test.py -f 7-tmq/subscribeStb2.py