Merge branch 'main' into merge/mainto3.0
This commit is contained in:
commit
3a739beaa5
|
@ -38,7 +38,6 @@ typedef enum {
|
|||
STREAM_QUEUE,
|
||||
ARB_QUEUE,
|
||||
STREAM_CTRL_QUEUE,
|
||||
STREAM_LONG_EXEC_QUEUE,
|
||||
QUEUE_MAX,
|
||||
} EQueueType;
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo);
|
|||
*/
|
||||
int32_t qAsyncKillTask(qTaskInfo_t tinfo, int32_t rspCode);
|
||||
|
||||
int32_t qKillTask(qTaskInfo_t tinfo, int32_t rspCode, int64_t waitDuration);
|
||||
int32_t qKillTask(qTaskInfo_t tinfo, int32_t rspCode);
|
||||
|
||||
bool qTaskIsExecuting(qTaskInfo_t qinfo);
|
||||
|
||||
|
|
|
@ -58,7 +58,6 @@ extern "C" {
|
|||
#define STREAM_EXEC_T_STOP_ALL_TASKS (-5)
|
||||
#define STREAM_EXEC_T_RESUME_TASK (-6)
|
||||
#define STREAM_EXEC_T_ADD_FAILED_TASK (-7)
|
||||
#define STREAM_EXEC_T_STOP_ONE_TASK (-8)
|
||||
|
||||
typedef struct SStreamTask SStreamTask;
|
||||
typedef struct SStreamQueue SStreamQueue;
|
||||
|
@ -769,19 +768,15 @@ void streamMetaCleanup();
|
|||
int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild expandFunc, FTaskExpand expandTaskFn, int32_t vgId,
|
||||
int64_t stage, startComplete_fn_t fn, SStreamMeta** pMeta);
|
||||
void streamMetaClose(SStreamMeta* streamMeta);
|
||||
|
||||
int32_t streamMetaSaveTaskInMeta(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store
|
||||
int32_t streamMetaRemoveTaskInMeta(SStreamMeta* pMeta, STaskId* pKey);
|
||||
|
||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store
|
||||
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pKey);
|
||||
int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded);
|
||||
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
|
||||
|
||||
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
||||
int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask);
|
||||
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
|
||||
void streamMetaClear(SStreamMeta* pMeta);
|
||||
void streamMetaInitBackend(SStreamMeta* pMeta);
|
||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||
|
@ -815,7 +810,6 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
|||
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
int32_t streamMetaStopOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
bool streamMetaAllTasksReady(const SStreamMeta* pMeta);
|
||||
int32_t streamTaskSendNegotiateChkptIdMsg(SStreamTask* pTask);
|
||||
int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts);
|
||||
|
|
|
@ -76,7 +76,7 @@ void tQWorkerFreeQueue(SQWorkerPool *pool, STaosQueue *queue);
|
|||
|
||||
int32_t tAutoQWorkerInit(SAutoQWorkerPool *pool);
|
||||
void tAutoQWorkerCleanup(SAutoQWorkerPool *pool);
|
||||
STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem fp, int32_t minNum);
|
||||
STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem fp);
|
||||
void tAutoQWorkerFreeQueue(SAutoQWorkerPool *pool, STaosQueue *queue);
|
||||
|
||||
int32_t tWWorkerInit(SWWorkerPool *pool);
|
||||
|
|
|
@ -32,7 +32,6 @@ typedef struct SVnodeMgmt {
|
|||
const char *name;
|
||||
SQueryAutoQWorkerPool queryPool;
|
||||
SAutoQWorkerPool streamPool;
|
||||
SAutoQWorkerPool streamLongExecPool;
|
||||
SWWorkerPool streamCtrlPool;
|
||||
SWWorkerPool fetchPool;
|
||||
SSingleWorker mgmtWorker;
|
||||
|
@ -76,7 +75,6 @@ typedef struct {
|
|||
STaosQueue *pQueryQ;
|
||||
STaosQueue *pStreamQ;
|
||||
STaosQueue *pStreamCtrlQ;
|
||||
STaosQueue *pStreamLongExecQ;
|
||||
STaosQueue *pFetchQ;
|
||||
STaosQueue *pMultiMgmQ;
|
||||
} SVnodeObj;
|
||||
|
@ -139,8 +137,6 @@ int32_t vmPutMsgToQueryQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
|||
int32_t vmPutMsgToFetchQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmPutMsgToStreamQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmPutMsgToStreamCtrlQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmPutMsgToStreamLongExecQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
||||
int32_t vmPutMsgToMergeQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmPutMsgToMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
||||
|
|
|
@ -1008,29 +1008,27 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RUN, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DISPATCH, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DISPATCH_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY, vmPutMsgToStreamLongExecQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_GET_STREAM_PROGRESS, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_UPDATE_CHKPT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_CONSEN_CHKPT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_PAUSE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_CHECK_POINT_SOURCE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_TRIGGER_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_CHKPT_REPORT_RSP, vmPutMsgToStreamCtrlQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_GET_STREAM_PROGRESS, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_UPDATE_CHKPT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_CONSEN_CHKPT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -398,14 +398,10 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal,
|
|||
|
||||
dInfo("vgId:%d, wait for vnode stream queue:%p is empty, %d remains", pVnode->vgId,
|
||||
pVnode->pStreamQ, taosQueueItemSize(pVnode->pStreamQ));
|
||||
while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(50);
|
||||
while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode stream ctrl queue:%p is empty", pVnode->vgId, pVnode->pStreamCtrlQ);
|
||||
while (!taosQueueEmpty(pVnode->pStreamCtrlQ)) taosMsleep(50);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode stream long-exec queue:%p is empty, %d remains", pVnode->vgId,
|
||||
pVnode->pStreamLongExecQ, taosQueueItemSize(pVnode->pStreamLongExecQ));
|
||||
while (!taosQueueEmpty(pVnode->pStreamLongExecQ)) taosMsleep(50);
|
||||
while (!taosQueueEmpty(pVnode->pStreamCtrlQ)) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, all vnode queues is empty", pVnode->vgId);
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ static void vmProcessStreamCtrlQueue(SQueueInfo *pInfo, STaosQall* pQall, int32_
|
|||
SRpcMsg *pMsg = pItem;
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
|
||||
dGTrace("vgId:%d, msg:%p get from vnode-stream-ctrl queue", pVnode->vgId, pMsg);
|
||||
dGTrace("vgId:%d, msg:%p get from vnode-ctrl-stream queue", pVnode->vgId, pMsg);
|
||||
code = vnodeProcessStreamCtrlMsg(pVnode->pImpl, pMsg, pInfo);
|
||||
if (code != 0) {
|
||||
terrno = code;
|
||||
|
@ -165,26 +165,6 @@ static void vmProcessStreamCtrlQueue(SQueueInfo *pInfo, STaosQall* pQall, int32_
|
|||
}
|
||||
}
|
||||
|
||||
static void vmProcessStreamLongExecQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
int32_t code = 0;
|
||||
|
||||
dGTrace("vgId:%d, msg:%p get from vnode-stream long-exec queue", pVnode->vgId, pMsg);
|
||||
|
||||
code = vnodeProcessStreamLongExecMsg(pVnode->pImpl, pMsg, pInfo);
|
||||
if (code != 0) {
|
||||
terrno = code;
|
||||
dGError("vgId:%d, msg:%p failed to process stream msg %s since %s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType),
|
||||
tstrerror(code));
|
||||
vmSendRsp(pMsg, code);
|
||||
}
|
||||
|
||||
dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
static void vmProcessFetchQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
||||
SVnodeObj *pVnode = pInfo->ahandle;
|
||||
SRpcMsg *pMsg = NULL;
|
||||
|
@ -294,13 +274,9 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
code = taosWriteQitem(pVnode->pStreamQ, pMsg);
|
||||
break;
|
||||
case STREAM_CTRL_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-stream-ctrl queue", pVnode->vgId, pMsg);
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-ctrl-stream queue", pVnode->vgId, pMsg);
|
||||
code = taosWriteQitem(pVnode->pStreamCtrlQ, pMsg);
|
||||
break;
|
||||
case STREAM_LONG_EXEC_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-stream-long-exec queue", pVnode->vgId, pMsg);
|
||||
code = taosWriteQitem(pVnode->pStreamLongExecQ, pMsg);
|
||||
break;
|
||||
case FETCH_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-fetch queue", pVnode->vgId, pMsg);
|
||||
code = taosWriteQitem(pVnode->pFetchQ, pMsg);
|
||||
|
@ -359,8 +335,6 @@ int32_t vmPutMsgToStreamQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMs
|
|||
|
||||
int32_t vmPutMsgToStreamCtrlQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_CTRL_QUEUE); }
|
||||
|
||||
int32_t vmPutMsgToStreamLongExecQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, STREAM_LONG_EXEC_QUEUE); }
|
||||
|
||||
int32_t vmPutMsgToMultiMgmtQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
dGTrace("msg:%p, put into vnode-multi-mgmt queue", pMsg);
|
||||
|
@ -435,10 +409,6 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
|
|||
break;
|
||||
case STREAM_CTRL_QUEUE:
|
||||
size = taosQueueItemSize(pVnode->pStreamCtrlQ);
|
||||
break;
|
||||
case STREAM_LONG_EXEC_QUEUE:
|
||||
size = taosQueueItemSize(pVnode->pStreamLongExecQ);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -481,16 +451,13 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
}
|
||||
|
||||
pVnode->pQueryQ = tQueryAutoQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
|
||||
pVnode->pStreamQ = tAutoQWorkerAllocQueue(&pMgmt->streamPool, pVnode, (FItem)vmProcessStreamQueue);
|
||||
pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
|
||||
|
||||
// init stream msg processing queue family
|
||||
pVnode->pStreamQ = tAutoQWorkerAllocQueue(&pMgmt->streamPool, pVnode, (FItem)vmProcessStreamQueue, 2);
|
||||
pVnode->pStreamCtrlQ = tWWorkerAllocQueue(&pMgmt->streamCtrlPool, pVnode, (FItems)vmProcessStreamCtrlQueue);
|
||||
pVnode->pStreamLongExecQ = tAutoQWorkerAllocQueue(&pMgmt->streamLongExecPool, pVnode, (FItem)vmProcessStreamLongExecQueue, 1);
|
||||
|
||||
if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncRdW.queue == NULL ||
|
||||
pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL
|
||||
|| pVnode->pStreamCtrlQ == NULL || pVnode->pStreamLongExecQ == NULL) {
|
||||
|| pVnode->pStreamCtrlQ == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -506,7 +473,6 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
|
||||
taosQueueGetThreadId(pVnode->pFetchQ));
|
||||
dInfo("vgId:%d, stream-queue:%p is alloced", pVnode->vgId, pVnode->pStreamQ);
|
||||
dInfo("vgId:%d, stream-long-exec-queue:%p is alloced", pVnode->vgId, pVnode->pStreamLongExecQ);
|
||||
dInfo("vgId:%d, stream-ctrl-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pStreamCtrlQ,
|
||||
taosQueueGetThreadId(pVnode->pStreamCtrlQ));
|
||||
return 0;
|
||||
|
@ -515,22 +481,17 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||
tQueryAutoQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
|
||||
tAutoQWorkerFreeQueue(&pMgmt->streamPool, pVnode->pStreamQ);
|
||||
tAutoQWorkerFreeQueue(&pMgmt->streamLongExecPool, pVnode->pStreamLongExecQ);
|
||||
tWWorkerFreeQueue(&pMgmt->streamCtrlPool, pVnode->pStreamCtrlQ);
|
||||
tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
|
||||
pVnode->pQueryQ = NULL;
|
||||
pVnode->pFetchQ = NULL;
|
||||
|
||||
pVnode->pStreamQ = NULL;
|
||||
pVnode->pStreamCtrlQ = NULL;
|
||||
pVnode->pStreamLongExecQ = NULL;
|
||||
|
||||
pVnode->pFetchQ = NULL;
|
||||
dDebug("vgId:%d, queue is freed", pVnode->vgId);
|
||||
}
|
||||
|
||||
int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
||||
int32_t code = 0;
|
||||
|
||||
int32_t code = 0;
|
||||
SQueryAutoQWorkerPool *pQPool = &pMgmt->queryPool;
|
||||
pQPool->name = "vnode-query";
|
||||
pQPool->min = tsNumOfVnodeQueryThreads;
|
||||
|
@ -544,13 +505,8 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
|||
pStreamPool->ratio = tsRatioOfVnodeStreamThreads;
|
||||
if ((code = tAutoQWorkerInit(pStreamPool)) != 0) return code;
|
||||
|
||||
SAutoQWorkerPool *pLongExecPool = &pMgmt->streamLongExecPool;
|
||||
pLongExecPool->name = "vnode-stream-long-exec";
|
||||
pLongExecPool->ratio = tsRatioOfVnodeStreamThreads/3;
|
||||
if ((code = tAutoQWorkerInit(pLongExecPool)) != 0) return code;
|
||||
|
||||
SWWorkerPool *pStreamCtrlPool = &pMgmt->streamCtrlPool;
|
||||
pStreamCtrlPool->name = "vnode-stream-ctrl";
|
||||
pStreamCtrlPool->name = "vnode-ctrl-stream";
|
||||
pStreamCtrlPool->max = 1;
|
||||
if ((code = tWWorkerInit(pStreamCtrlPool)) != 0) return code;
|
||||
|
||||
|
@ -585,7 +541,6 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
|||
void vmStopWorker(SVnodeMgmt *pMgmt) {
|
||||
tQueryAutoQWorkerCleanup(&pMgmt->queryPool);
|
||||
tAutoQWorkerCleanup(&pMgmt->streamPool);
|
||||
tAutoQWorkerCleanup(&pMgmt->streamLongExecPool);
|
||||
tWWorkerCleanup(&pMgmt->streamCtrlPool);
|
||||
tWWorkerCleanup(&pMgmt->fetchPool);
|
||||
dDebug("vnode workers are closed");
|
||||
|
|
|
@ -113,7 +113,6 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
|||
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||
int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||
int32_t vnodeProcessStreamCtrlMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||
int32_t vnodeProcessStreamLongExecMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
||||
void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
||||
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit);
|
||||
|
|
|
@ -1302,7 +1302,7 @@ _checkpoint:
|
|||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
if ((code = streamMetaSaveTaskInMeta(pMeta, pTask)) != 0) {
|
||||
if ((code = streamMetaSaveTask(pMeta, pTask)) != 0) {
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosHashCancelIterate(pInfoHash, infoHash);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
|
|
@ -962,6 +962,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SStreamTask* pTask = NULL;
|
||||
SStreamTask* pStreamTask = NULL;
|
||||
char* pStatus = NULL;
|
||||
|
||||
code = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId, &pTask);
|
||||
if (pTask == NULL) {
|
||||
|
@ -972,7 +973,29 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
// do recovery step1
|
||||
const char* id = pTask->id.idStr;
|
||||
char* pStatus = streamTaskGetStatus(pTask).name;
|
||||
streamMutexLock(&pTask->lock);
|
||||
|
||||
SStreamTaskState s = streamTaskGetStatus(pTask);
|
||||
pStatus = s.name;
|
||||
|
||||
if ((s.state != TASK_STATUS__SCAN_HISTORY) || (pTask->status.downstreamReady == 0)) {
|
||||
tqError("s-task:%s vgId:%d status:%s downstreamReady:%d not allowed/ready for scan-history data, quit", id,
|
||||
pMeta->vgId, s.name, pTask->status.downstreamReady);
|
||||
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pTask->exec.pExecutor == NULL) {
|
||||
tqError("s-task:%s vgId:%d executor is null, not executor scan history", id, pMeta->vgId);
|
||||
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
|
||||
// avoid multi-thread exec
|
||||
while (1) {
|
||||
|
|
|
@ -268,13 +268,13 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
// stream do update the nodeEp info, write it into stream meta.
|
||||
if (updated) {
|
||||
tqDebug("s-task:%s vgId:%d save task after update epset, and stop task", idstr, vgId);
|
||||
code = streamMetaSaveTaskInMeta(pMeta, pTask);
|
||||
code = streamMetaSaveTask(pMeta, pTask);
|
||||
if (code) {
|
||||
tqError("s-task:%s vgId:%d failed to save task, code:%s", idstr, vgId, tstrerror(code));
|
||||
}
|
||||
|
||||
if (pHTask != NULL) {
|
||||
code = streamMetaSaveTaskInMeta(pMeta, pHTask);
|
||||
code = streamMetaSaveTask(pMeta, pHTask);
|
||||
if (code) {
|
||||
tqError("s-task:%s vgId:%d failed to save related history task, code:%s", idstr, vgId, tstrerror(code));
|
||||
}
|
||||
|
@ -751,8 +751,6 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
|
|||
}
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
tqDebug("vgId:%d process drop task:0x%x completed", vgId, pReq->taskId);
|
||||
|
||||
return 0; // always return success
|
||||
}
|
||||
|
||||
|
@ -867,9 +865,6 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead
|
|||
} else if (type == STREAM_EXEC_T_ADD_FAILED_TASK) {
|
||||
code = streamMetaAddFailedTask(pMeta, req.streamId, req.taskId);
|
||||
return code;
|
||||
} else if (type == STREAM_EXEC_T_STOP_ONE_TASK) {
|
||||
code = streamMetaStopOneTask(pMeta, req.streamId, req.taskId);
|
||||
return code;
|
||||
} else if (type == STREAM_EXEC_T_RESUME_TASK) { // task resume to run after idle for a while
|
||||
SStreamTask* pTask = NULL;
|
||||
code = streamMetaAcquireTask(pMeta, req.streamId, req.taskId, &pTask);
|
||||
|
|
|
@ -934,7 +934,9 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
|||
|
||||
int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
||||
vTrace("vgId:%d, msg:%p in stream queue is processing", pVnode->config.vgId, pMsg);
|
||||
if (!syncIsReadyForRead(pVnode->sync)) {
|
||||
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG ||
|
||||
pMsg->msgType == TDMT_VND_BATCH_META) &&
|
||||
!syncIsReadyForRead(pVnode->sync)) {
|
||||
vnodeRedirectRpcMsg(pVnode, pMsg, terrno);
|
||||
return 0;
|
||||
}
|
||||
|
@ -946,6 +948,8 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
|||
return tqProcessTaskRetrieveReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_RSP:
|
||||
return tqProcessTaskRetrieveRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_STREAM_SCAN_HISTORY:
|
||||
return tqProcessTaskScanHistory(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_GET_STREAM_PROGRESS:
|
||||
return tqStreamProgressRetrieveReq(pVnode->pTq, pMsg);
|
||||
default:
|
||||
|
@ -992,22 +996,6 @@ int32_t vnodeProcessStreamCtrlMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pIn
|
|||
}
|
||||
}
|
||||
|
||||
int32_t vnodeProcessStreamLongExecMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
||||
vTrace("vgId:%d, msg:%p in stream long exec queue is processing", pVnode->config.vgId, pMsg);
|
||||
if (!syncIsReadyForRead(pVnode->sync)) {
|
||||
vnodeRedirectRpcMsg(pVnode, pMsg, terrno);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (pMsg->msgType) {
|
||||
case TDMT_VND_STREAM_SCAN_HISTORY:
|
||||
return tqProcessTaskScanHistory(pVnode->pTq, pMsg);
|
||||
default:
|
||||
vError("unknown msg type:%d in stream long exec queue", pMsg->msgType);
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
|
||||
int32_t code = tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data);
|
||||
if (code) {
|
||||
|
|
|
@ -995,43 +995,26 @@ int32_t qAsyncKillTask(qTaskInfo_t qinfo, int32_t rspCode) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qKillTask(qTaskInfo_t tinfo, int32_t rspCode, int64_t waitDuration) {
|
||||
int64_t st = taosGetTimestampMs();
|
||||
int32_t qKillTask(qTaskInfo_t tinfo, int32_t rspCode) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
if (pTaskInfo == NULL) {
|
||||
return TSDB_CODE_QRY_INVALID_QHANDLE;
|
||||
}
|
||||
|
||||
if (waitDuration > 0) {
|
||||
qDebug("%s sync killed execTask, and waiting for %.2fs", GET_TASKID(pTaskInfo), waitDuration/1000.0);
|
||||
} else {
|
||||
qDebug("%s async killed execTask", GET_TASKID(pTaskInfo));
|
||||
}
|
||||
|
||||
qDebug("%s sync killed execTask", GET_TASKID(pTaskInfo));
|
||||
setTaskKilled(pTaskInfo, TSDB_CODE_TSC_QUERY_KILLED);
|
||||
|
||||
if (waitDuration > 0) {
|
||||
while (1) {
|
||||
taosWLockLatch(&pTaskInfo->lock);
|
||||
if (qTaskIsExecuting(pTaskInfo)) { // let's wait for 100 ms and try again
|
||||
taosWUnLockLatch(&pTaskInfo->lock);
|
||||
|
||||
taosMsleep(200);
|
||||
|
||||
int64_t d = taosGetTimestampMs() - st;
|
||||
if (d >= waitDuration && waitDuration >= 0) {
|
||||
qWarn("%s waiting more than %.2fs, not wait anymore", GET_TASKID(pTaskInfo), waitDuration / 1000.0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else { // not running now
|
||||
pTaskInfo->code = rspCode;
|
||||
taosWUnLockLatch(&pTaskInfo->lock);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
while (1) {
|
||||
taosWLockLatch(&pTaskInfo->lock);
|
||||
if (qTaskIsExecuting(pTaskInfo)) { // let's wait for 100 ms and try again
|
||||
taosWUnLockLatch(&pTaskInfo->lock);
|
||||
taosMsleep(100);
|
||||
} else { // not running now
|
||||
pTaskInfo->code = rspCode;
|
||||
taosWUnLockLatch(&pTaskInfo->lock);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
bool qTaskIsExecuting(qTaskInfo_t qinfo) {
|
||||
|
|
|
@ -42,7 +42,9 @@ typedef struct SIndefOperatorInfo {
|
|||
} SIndefOperatorInfo;
|
||||
|
||||
static int32_t doGenerateSourceData(SOperatorInfo* pOperator);
|
||||
static SSDataBlock* doProjectOperation1(SOperatorInfo* pOperator);
|
||||
static int32_t doProjectOperation(SOperatorInfo* pOperator, SSDataBlock** pResBlock);
|
||||
static SSDataBlock* doApplyIndefinitFunction1(SOperatorInfo* pOperator);
|
||||
static int32_t doApplyIndefinitFunction(SOperatorInfo* pOperator, SSDataBlock** pResBlock);
|
||||
static int32_t setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOfCols, SArray** pResList);
|
||||
static int32_t setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SAggSupporter* pSup,
|
||||
|
@ -555,6 +557,12 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp
|
|||
}
|
||||
}
|
||||
|
||||
SSDataBlock* doApplyIndefinitFunction1(SOperatorInfo* pOperator) {
|
||||
SSDataBlock* pResBlock = NULL;
|
||||
pOperator->pTaskInfo->code = doApplyIndefinitFunction(pOperator, &pResBlock);
|
||||
return pResBlock;
|
||||
}
|
||||
|
||||
int32_t doApplyIndefinitFunction(SOperatorInfo* pOperator, SSDataBlock** pResBlock) {
|
||||
QRY_PARAM_CHECK(pResBlock);
|
||||
SIndefOperatorInfo* pIndefInfo = pOperator->info;
|
||||
|
|
|
@ -595,68 +595,71 @@ void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) {
|
|||
pTask->id.idStr, pInfo->failedId, pTask->chkInfo.checkpointId);
|
||||
}
|
||||
|
||||
int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SVUpdateCheckpointInfoReq* pReq) {
|
||||
// The checkpointInfo can be updated in the following three cases:
|
||||
// 1. follower tasks; 2. leader task with status of TASK_STATUS__CK; 3. restore not completed
|
||||
static int32_t doUpdateCheckpointInfoCheck(SStreamTask* pTask, bool restored, SVUpdateCheckpointInfoReq* pReq,
|
||||
bool* pContinue) {
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
const char* id = pTask->id.idStr;
|
||||
SCheckpointInfo* pInfo = &pTask->chkInfo;
|
||||
|
||||
streamMutexLock(&pTask->lock);
|
||||
*pContinue = true;
|
||||
|
||||
// not update the checkpoint info if the checkpointId is less than the failed checkpointId
|
||||
if (pReq->checkpointId < pInfo->pActiveInfo->failedId) {
|
||||
stWarn("s-task:%s vgId:%d not update the checkpoint-info, since update checkpointId:%" PRId64
|
||||
" is less than the failed checkpointId:%" PRId64 ", discard the update info",
|
||||
" is less than the failed checkpointId:%" PRId64 ", discard",
|
||||
id, vgId, pReq->checkpointId, pInfo->pActiveInfo->failedId);
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
|
||||
// always return true
|
||||
*pContinue = false;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// it's an expired checkpointInfo update msg, we still try to drop the required drop fill-history task.
|
||||
if (pReq->checkpointId <= pInfo->checkpointId) {
|
||||
stDebug("s-task:%s vgId:%d latest checkpointId:%" PRId64 " Ver:%" PRId64
|
||||
" no need to update checkpoint info, updated checkpointId:%" PRId64 " Ver:%" PRId64 " transId:%d ignored",
|
||||
id, vgId, pInfo->checkpointId, pInfo->checkpointVer, pReq->checkpointId, pReq->checkpointVer,
|
||||
pReq->transId);
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
|
||||
{ // destroy the related fill-history tasks
|
||||
// drop task should not in the meta-lock, and drop the related fill-history task now
|
||||
if (pReq->dropRelHTask) {
|
||||
code = streamMetaUnregisterTask(pMeta, pReq->hStreamId, pReq->hTaskId);
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
stDebug("s-task:%s vgId:%d related fill-history task:0x%x dropped in update checkpointInfo, remain tasks:%d",
|
||||
id, vgId, pReq->taskId, numOfTasks);
|
||||
}
|
||||
{ // destroy the related fill-history tasks
|
||||
if (pReq->dropRelHTask) {
|
||||
code = streamMetaUnregisterTask(pMeta, pReq->hStreamId, pReq->hTaskId);
|
||||
|
||||
if (pReq->dropRelHTask) {
|
||||
code = streamMetaCommit(pMeta);
|
||||
}
|
||||
}
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
stDebug("s-task:%s vgId:%d related fill-history task:0x%x dropped in update checkpointInfo, remain tasks:%d",
|
||||
id, vgId, pReq->taskId, numOfTasks);
|
||||
|
||||
//todo: task may not exist, commit anyway, optimize this later
|
||||
code = streamMetaCommit(pMeta);
|
||||
}
|
||||
}
|
||||
|
||||
*pContinue = false;
|
||||
// always return true
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SStreamTaskState pStatus = streamTaskGetStatus(pTask);
|
||||
SStreamTaskState status = streamTaskGetStatus(pTask);
|
||||
|
||||
if (!restored) { // during restore procedure, do update checkpoint-info
|
||||
stDebug("s-task:%s vgId:%d status:%s update the checkpoint-info during restore, checkpointId:%" PRId64 "->%" PRId64
|
||||
" checkpointVer:%" PRId64 "->%" PRId64 " checkpointTs:%" PRId64 "->%" PRId64,
|
||||
id, vgId, pStatus.name, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer, pReq->checkpointVer,
|
||||
id, vgId, status.name, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer, pReq->checkpointVer,
|
||||
pInfo->checkpointTime, pReq->checkpointTs);
|
||||
} else { // not in restore status, must be in checkpoint status
|
||||
if ((pStatus.state == TASK_STATUS__CK) || (pMeta->role == NODE_ROLE_FOLLOWER)) {
|
||||
stDebug("s-task:%s vgId:%d status:%s role:%d start to update the checkpoint-info, checkpointId:%" PRId64 "->%" PRId64
|
||||
" checkpointVer:%" PRId64 "->%" PRId64 " checkpointTs:%" PRId64 "->%" PRId64,
|
||||
id, vgId, pStatus.name, pMeta->role, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer,
|
||||
if (((status.state == TASK_STATUS__CK) && (pMeta->role == NODE_ROLE_LEADER)) ||
|
||||
(pMeta->role == NODE_ROLE_FOLLOWER)) {
|
||||
stDebug("s-task:%s vgId:%d status:%s role:%d start to update the checkpoint-info, checkpointId:%" PRId64
|
||||
"->%" PRId64 " checkpointVer:%" PRId64 "->%" PRId64 " checkpointTs:%" PRId64 "->%" PRId64,
|
||||
id, vgId, status.name, pMeta->role, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer,
|
||||
pReq->checkpointVer, pInfo->checkpointTime, pReq->checkpointTs);
|
||||
} else {
|
||||
stDebug("s-task:%s vgId:%d status:%s NOT update the checkpoint-info, checkpointId:%" PRId64 "->%" PRId64
|
||||
" checkpointVer:%" PRId64 "->%" PRId64,
|
||||
id, vgId, pStatus.name, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer,
|
||||
id, vgId, status.name, pInfo->checkpointId, pReq->checkpointId, pInfo->checkpointVer,
|
||||
pReq->checkpointVer);
|
||||
}
|
||||
}
|
||||
|
@ -665,14 +668,48 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV
|
|||
pInfo->processedVer <= pReq->checkpointVer);
|
||||
|
||||
if (!valid) {
|
||||
stFatal("s-task:%s invalid checkpointId update info recv, current checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" processedVer:%" PRId64 " req checkpointId:%" PRId64 " checkpointVer:%" PRId64 " discard it",
|
||||
id, pInfo->checkpointId, pInfo->checkpointVer, pInfo->processedVer, pReq->checkpointId,
|
||||
pReq->checkpointVer);
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
return TSDB_CODE_STREAM_INTERNAL_ERROR;
|
||||
// invalid update checkpoint info for leader, since the processedVer is greater than the checkpointVer
|
||||
// It is possible for follower tasks that the processedVer is greater than the checkpointVer, and the processed info
|
||||
// in follower tasks will be discarded, since the leader/follower switch happens before the checkpoint of the
|
||||
// processedVer being generated.
|
||||
if (pMeta->role == NODE_ROLE_LEADER) {
|
||||
|
||||
stFatal("s-task:%s checkpointId update info recv, current checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" processedVer:%" PRId64 " req checkpointId:%" PRId64 " checkpointVer:%" PRId64 " discard it",
|
||||
id, pInfo->checkpointId, pInfo->checkpointVer, pInfo->processedVer, pReq->checkpointId,
|
||||
pReq->checkpointVer);
|
||||
|
||||
*pContinue = false;
|
||||
return TSDB_CODE_STREAM_INTERNAL_ERROR;
|
||||
} else {
|
||||
stInfo("s-task:%s vgId:%d follower recv checkpointId update info, current checkpointId:%" PRId64
|
||||
" checkpointVer:%" PRId64 " processedVer:%" PRId64 " req checkpointId:%" PRId64 " checkpointVer:%" PRId64,
|
||||
id, pMeta->vgId, pInfo->checkpointId, pInfo->checkpointVer, pInfo->processedVer, pReq->checkpointId,
|
||||
pReq->checkpointVer);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SVUpdateCheckpointInfoReq* pReq) {
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
const char* id = pTask->id.idStr;
|
||||
SCheckpointInfo* pInfo = &pTask->chkInfo;
|
||||
bool continueUpdate = true;
|
||||
|
||||
streamMutexLock(&pTask->lock);
|
||||
code = doUpdateCheckpointInfoCheck(pTask, restored, pReq, &continueUpdate);
|
||||
|
||||
if (!continueUpdate) {
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
return code;
|
||||
}
|
||||
|
||||
SStreamTaskState pStatus = streamTaskGetStatus(pTask);
|
||||
|
||||
// update only it is in checkpoint status, or during restore procedure.
|
||||
if ((pStatus.state == TASK_STATUS__CK) || (!restored) || (pMeta->role == NODE_ROLE_FOLLOWER)) {
|
||||
pInfo->checkpointId = pReq->checkpointId;
|
||||
|
@ -697,7 +734,7 @@ int32_t streamTaskUpdateTaskCheckpointInfo(SStreamTask* pTask, bool restored, SV
|
|||
|
||||
pTask->status.taskStatus = TASK_STATUS__READY;
|
||||
|
||||
code = streamMetaSaveTaskInMeta(pMeta, pTask);
|
||||
code = streamMetaSaveTask(pMeta, pTask);
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1537,14 +1574,6 @@ int32_t deleteCheckpointFile(const char* id, const char* name) {
|
|||
int32_t streamTaskSendNegotiateChkptIdMsg(SStreamTask* pTask) {
|
||||
streamMutexLock(&pTask->lock);
|
||||
ETaskStatus p = streamTaskGetStatus(pTask).state;
|
||||
// if (pInfo->alreadySendChkptId == true) {
|
||||
// stDebug("s-task:%s already start to consensus-checkpointId, not start again before it completed", id);
|
||||
// streamMutexUnlock(&pTask->lock);
|
||||
// return TSDB_CODE_SUCCESS;
|
||||
// } else {
|
||||
// pInfo->alreadySendChkptId = true;
|
||||
// }
|
||||
//
|
||||
streamTaskSetReqConsenChkptId(pTask, taosGetTimestampMs());
|
||||
streamMutexUnlock(&pTask->lock);
|
||||
|
||||
|
|
|
@ -875,7 +875,7 @@ static int32_t doStreamExecTask(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||
if (el > 2.0) { // elapsed more than 5 sec, not occupy the CPU anymore
|
||||
if (el > 5.0) { // elapsed more than 5 sec, not occupy the CPU anymore
|
||||
stDebug("s-task:%s occupy more than 5.0s, release the exec threads and idle for 500ms", id);
|
||||
streamTaskSetIdleInfo(pTask, 500);
|
||||
return code;
|
||||
|
|
|
@ -633,7 +633,7 @@ void streamMetaCloseImpl(void* arg) {
|
|||
}
|
||||
|
||||
// todo let's check the status for each task
|
||||
int32_t streamMetaSaveTaskInMeta(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
int32_t vgId = pTask->pMeta->vgId;
|
||||
void* buf = NULL;
|
||||
int32_t len;
|
||||
|
@ -683,7 +683,7 @@ int32_t streamMetaSaveTaskInMeta(SStreamMeta* pMeta, SStreamTask* pTask) {
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t streamMetaRemoveTaskInMeta(SStreamMeta* pMeta, STaskId* pTaskId) {
|
||||
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pTaskId) {
|
||||
int64_t key[2] = {pTaskId->streamId, pTaskId->taskId};
|
||||
int32_t code = tdbTbDelete(pMeta->pTaskDb, key, STREAM_TASK_KEY_LEN, pMeta->txn);
|
||||
if (code != 0) {
|
||||
|
@ -706,7 +706,7 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa
|
|||
void* p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
|
||||
if (p != NULL) {
|
||||
stDebug("s-task:0x%" PRIx64 " already exist in meta, no need to register", id.taskId);
|
||||
stDebug("s-task:%" PRIx64 " already exist in meta, no need to register", id.taskId);
|
||||
tFreeStreamTask(pTask);
|
||||
return code;
|
||||
}
|
||||
|
@ -736,7 +736,7 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa
|
|||
return code;
|
||||
}
|
||||
|
||||
if ((code = streamMetaSaveTaskInMeta(pMeta, pTask)) != 0) {
|
||||
if ((code = streamMetaSaveTask(pMeta, pTask)) != 0) {
|
||||
int32_t unused = taosHashRemove(pMeta->pTasksMap, &id, sizeof(id));
|
||||
void* pUnused = taosArrayPop(pMeta->pTaskList);
|
||||
|
||||
|
@ -886,8 +886,6 @@ static void doRemoveIdFromList(SArray* pTaskList, int32_t num, SStreamTaskId* id
|
|||
|
||||
static int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask, void* param) {
|
||||
int32_t code = 0;
|
||||
int32_t waitingDuration = 5000;
|
||||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
code = streamTaskSendCheckpointSourceRsp(pTask);
|
||||
if (code) {
|
||||
|
@ -898,7 +896,7 @@ static int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask, void* param) {
|
|||
|
||||
// let's kill the query procedure within stream, to end it ASAP.
|
||||
if (pTask->info.taskLevel != TASK_LEVEL__SINK && pTask->exec.pExecutor != NULL) {
|
||||
code = qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS, -1);
|
||||
code = qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
stError("s-task:%s failed to kill task related query handle, code:%s", pTask->id.idStr, tstrerror(code));
|
||||
}
|
||||
|
@ -935,7 +933,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
|||
|
||||
code = taosHashRemove(pMeta->pTasksMap, &id, sizeof(id));
|
||||
doRemoveIdFromList(pMeta->pTaskList, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
||||
code = streamMetaRemoveTaskInMeta(pMeta, &id);
|
||||
code = streamMetaRemoveTask(pMeta, &id);
|
||||
if (code) {
|
||||
stError("vgId:%d failed to remove task:0x%" PRIx64 ", code:%s", pMeta->vgId, id.taskId, tstrerror(code));
|
||||
}
|
||||
|
@ -966,32 +964,6 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamMetaStopOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
|
||||
SStreamTask* pTask = NULL;
|
||||
int32_t code = 0;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t numOfTasks = 0;
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
// code = streamMetaUnregisterTask(pMeta, streamId, taskId);
|
||||
// numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
// if (code) {
|
||||
// stError("vgId:%d failed to drop task:0x%x, code:%s", vgId, taskId, tstrerror(code));
|
||||
// }
|
||||
//
|
||||
// code = streamMetaCommit(pMeta);
|
||||
// if (code) {
|
||||
// stError("vgId:%d failed to commit after drop task:0x%x, code:%s", vgId, taskId, tstrerror(code));
|
||||
// } else {
|
||||
// stDebug("s-task:0x%"PRIx64"-0x%x vgId:%d dropped, remain tasks:%d", streamId, taskId, pMeta->vgId, numOfTasks);
|
||||
// }
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t streamMetaBegin(SStreamMeta* pMeta) {
|
||||
streamMetaWLock(pMeta);
|
||||
int32_t code = tdbBegin(pMeta->db, &pMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL,
|
||||
|
@ -1215,7 +1187,7 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
|||
if (taosArrayGetSize(pRecycleList) > 0) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pRecycleList); ++i) {
|
||||
STaskId* pId = taosArrayGet(pRecycleList, i);
|
||||
code = streamMetaRemoveTaskInMeta(pMeta, pId);
|
||||
code = streamMetaRemoveTask(pMeta, pId);
|
||||
if (code) {
|
||||
stError("s-task:0x%" PRIx64 " failed to remove task, code:%s", pId->taskId, tstrerror(code));
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) {
|
|||
memcpy(serializedReq, &req, len);
|
||||
|
||||
SRpcMsg rpcMsg = {.contLen = len, .pCont = serializedReq, .msgType = TDMT_VND_STREAM_SCAN_HISTORY};
|
||||
return tmsgPutToQueue(pTask->pMsgCb, STREAM_LONG_EXEC_QUEUE, &rpcMsg);
|
||||
return tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &rpcMsg);
|
||||
}
|
||||
|
||||
void streamExecScanHistoryInFuture(SStreamTask* pTask, int32_t idleDuration) {
|
||||
|
|
|
@ -45,6 +45,10 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
|
|||
|
||||
if (numOfTasks == 0) {
|
||||
stInfo("vgId:%d no tasks exist, quit from consensus checkpointId", pMeta->vgId);
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
streamMetaResetStartInfo(&pMeta->startInfo, vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -447,6 +451,7 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
|
|||
continue;
|
||||
}
|
||||
|
||||
int64_t refId = pTask->id.refId;
|
||||
int32_t ret = streamTaskStop(pTask);
|
||||
if (ret) {
|
||||
stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret));
|
||||
|
|
|
@ -710,7 +710,7 @@ int32_t streamTaskStop(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
if (pTask->info.taskLevel != TASK_LEVEL__SINK && pTask->exec.pExecutor != NULL) {
|
||||
code = qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS, 5000);
|
||||
code = qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
stError("s-task:%s failed to kill task related query handle, code:%s", id, tstrerror(code));
|
||||
}
|
||||
|
@ -869,7 +869,7 @@ int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt) {
|
|||
pStreamTask->status.taskStatus = TASK_STATUS__READY;
|
||||
}
|
||||
|
||||
code = streamMetaSaveTaskInMeta(pMeta, pStreamTask);
|
||||
code = streamMetaSaveTask(pMeta, pStreamTask);
|
||||
streamMutexUnlock(&(pStreamTask->lock));
|
||||
|
||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||
|
@ -1034,7 +1034,7 @@ static int32_t taskPauseCallback(SStreamTask* pTask, void* param) {
|
|||
// in case of fill-history task, stop the tsdb file scan operation.
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
void* pExecutor = pTask->exec.pExecutor;
|
||||
code = qKillTask(pExecutor, TSDB_CODE_SUCCESS, 10000);
|
||||
code = qKillTask(pExecutor, TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
stDebug("vgId:%d s-task:%s set pause flag and pause task", pMeta->vgId, pTask->id.idStr);
|
||||
|
@ -1296,8 +1296,6 @@ const char* streamTaskGetExecType(int32_t type) {
|
|||
return "resume-task-from-idle";
|
||||
case STREAM_EXEC_T_ADD_FAILED_TASK:
|
||||
return "record-start-failed-task";
|
||||
case STREAM_EXEC_T_STOP_ONE_TASK:
|
||||
return "stop-one-task";
|
||||
case 0:
|
||||
return "exec-all-tasks";
|
||||
default:
|
||||
|
|
|
@ -256,7 +256,7 @@ static void *tAutoQWorkerThreadFp(SQueueWorker *worker) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem fp, int32_t minNum) {
|
||||
STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem fp) {
|
||||
int32_t code;
|
||||
STaosQueue *queue;
|
||||
|
||||
|
@ -280,10 +280,7 @@ STaosQueue *tAutoQWorkerAllocQueue(SAutoQWorkerPool *pool, void *ahandle, FItem
|
|||
int32_t queueNum = taosGetQueueNumber(pool->qset);
|
||||
int32_t curWorkerNum = taosArrayGetSize(pool->workers);
|
||||
int32_t dstWorkerNum = ceilf(queueNum * pool->ratio);
|
||||
|
||||
if (dstWorkerNum < minNum) {
|
||||
dstWorkerNum = minNum;
|
||||
}
|
||||
if (dstWorkerNum < 2) dstWorkerNum = 2;
|
||||
|
||||
// spawn a thread to process queue
|
||||
while (curWorkerNum < dstWorkerNum) {
|
||||
|
|
|
@ -79,7 +79,7 @@
|
|||
(void)streamMetaAddFailedTask
|
||||
(void)streamMetaAddTaskLaunchResult
|
||||
(void)streamMetaCommit
|
||||
(void)streamMetaRemoveTaskInMeta
|
||||
(void)streamMetaRemoveTask
|
||||
(void)streamMetaSendHbHelper
|
||||
(void)streamMetaStartAllTasks
|
||||
(void)streamMetaStartOneTask
|
||||
|
|
|
@ -71,7 +71,11 @@ class TDCases:
|
|||
runNum = 0
|
||||
for tmp in self.linuxCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
# get the last class name as the test case class name
|
||||
class_names = [name for name, obj in inspect.getmembers(testModule, inspect.isclass)]
|
||||
class_names = class_names[-1]
|
||||
case_class = getattr(testModule, class_names)
|
||||
case = case_class()
|
||||
case.init(conn, self._logSql, replicaVar)
|
||||
try:
|
||||
case.run()
|
||||
|
@ -103,7 +107,11 @@ class TDCases:
|
|||
runNum = 0
|
||||
for tmp in self.windowsCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
case = testModule.TDTestCase()
|
||||
# get the last class name as the test case class name
|
||||
class_names = [name for name, obj in inspect.getmembers(testModule, inspect.isclass)]
|
||||
class_names = class_names[-1]
|
||||
case_class = getattr(testModule, class_names)
|
||||
case = case_class()
|
||||
case.init(conn, self._logSql,replicaVar)
|
||||
try:
|
||||
case.run()
|
||||
|
@ -117,12 +125,17 @@ class TDCases:
|
|||
|
||||
def runAllCluster(self):
|
||||
# TODO: load all cluster case module here
|
||||
testModule = self.__dynamicLoadModule(fileName)
|
||||
|
||||
runNum = 0
|
||||
for tmp in self.clusterCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
case = testModule.TDTestCase()
|
||||
# get the last class name as the test case class name
|
||||
class_names = [name for name, obj in inspect.getmembers(testModule, inspect.isclass)]
|
||||
class_names = class_names[-1]
|
||||
case_class = getattr(testModule, class_names)
|
||||
case = case_class()
|
||||
case.init()
|
||||
case.run()
|
||||
case.stop()
|
||||
|
@ -138,7 +151,11 @@ class TDCases:
|
|||
for tmp in self.clusterCases:
|
||||
if tmp.name.find(fileName) != -1:
|
||||
tdLog.notice("run cases like %s" % (fileName))
|
||||
case = testModule.TDTestCase()
|
||||
# get the last class name as the test case class name
|
||||
class_names = [name for name, obj in inspect.getmembers(testModule, inspect.isclass)]
|
||||
class_names = class_names[-1]
|
||||
case_class = getattr(testModule, class_names)
|
||||
case = case_class()
|
||||
case.init()
|
||||
case.run()
|
||||
case.stop()
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
## \brief Test create view and drop view functions
|
||||
sql connect
|
||||
sql use testa;
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
::: data_write.stmt.test_stmt_set_tbname_tag
|
|
@ -0,0 +1 @@
|
|||
::: high_availability.2_replica.test_replica2
|
|
@ -0,0 +1,205 @@
|
|||
# 用例编写规范
|
||||
## 1. 背景
|
||||
在 TDengine 的测试中,长期存在以下两个问题:
|
||||
### 问题一:测试用例缺乏整理
|
||||
在 TDengine 的 CI 测试中,执行了 1,600+ 个测试用例。但随着产品迭代,这些用例覆盖了哪些功能,没有覆盖哪些功能,缺乏文档整理。构建和维护一个详尽的测试用例描述文档,将有助于我们识别缺失的用例,并进行针对性地补充。但使用传统的方法,用文档或测试用例管理系统来维护测试用例,存在以下两个弊端:
|
||||
|
||||
1. 维护成本高
|
||||
2. 文档和代码分离,导致文档容易“生锈”
|
||||
### 问题二:多个测试框架并存
|
||||
由于历史原因,TDengine 的 CI 测试用例是使用不同的语言、框架 (包括 TSIM, System Test, ARMY 等) 编写的,分布在 TDengine/tests 目录中的不同的子目录下,用例的管理稍显混乱。为了执行“全量测试”,需要使用不同的框架,分别执行多个不同子目录下的用例才能完成。每个框架编写的用例,编写方式和执行方式都存在差异,也很难生成统一风格的测试报告。## 2. 变更历史
|
||||
|
||||
## 2. 变更历史
|
||||
| 日期 | 版本 | 负责人 |主要修改内容|
|
||||
|:-----------|:-----------|:-------------| :--- |
|
||||
| 2025/2/12 | 0.1 | @冯超 @霍宏 @王旭 | 初始文档|
|
||||
| 2025/2/17 | 0.2 | @霍宏 @王旭 | 修改用例编写规范 |
|
||||
|
||||
## 3. 解决方案
|
||||
为了解决以上问题,平台部经和研发负责人的讨论,达成一致意见,研发部需要进行以下调整:
|
||||
|
||||
1. 平台部负责对现有的测试框架进行整合和优化,以后所有用例的编写,均使用统一的语言、框架,主要改动如下:
|
||||
- 统一使用 Python 进行功能用例的编写
|
||||
- 引入 pytest 作为 test runner, pytest 是 Python 生态中应用非常广泛,可以方便地控制用例的执行粒度(执行一个用例、一组用例、按 marker 执行用例等),除此以外,它还提供了丰富的插件,例如:与测试报告相关的 allure-pytest 等
|
||||
- 将 system-test, TestNG 等现有框架的核心库进行整合,以简化测试用例的编写
|
||||
2. 产品研发人员,编写用例时,需要以统一的格式为用例添加描述信息等 metadata, 平台部将使用工具,以自动化的方式,从测试用例代码自动生成测试用例文档,这也符合我们公司推行的“一切代码化”的要求。
|
||||
3. 工具平台部将使用工具 mkdocs + mkdocstrings 提取以上信息,生成用例文档,并将其部署至 Github Pages 或内网
|
||||
## 4. 目标和范围
|
||||
本规范适用于以下范围:
|
||||
|
||||
- 仓库
|
||||
- TDengine: /tests/test_new (暂定名,为了和当前的 /tests 目录下存量用例进行区分)
|
||||
- taosX: /tests/e2e
|
||||
- 测试类型:端到端的功能测试
|
||||
- 编程语言:Python
|
||||
|
||||
其它仓库或其它类型的测试,基于不同框架/语言实现,暂时不对用例规范做强制要求,可仅作为参考。
|
||||
## 5. 用例编写规范
|
||||
### 5.1 用例目录
|
||||
对用例目录的要求如下:
|
||||
|
||||
1. 新增用例统一存放至 tests/test_new
|
||||
2. 用例采用两级目录的形式组织,详见:附录1
|
||||
### 5.2 用例文件
|
||||
对用例文件的要求如下:
|
||||
|
||||
1. 用例 Python 文件命名应以 test_开头,例如:test_join.py
|
||||
5.3 用例规范
|
||||
对用例文件内容的要求如下:
|
||||
|
||||
1. 用例 Python 文件中,需要定义一个 Class,以 Test 开头命名,建议与文件名一致,例如:TestJoin
|
||||
2. 一个用例 Python 文件可以包含多个用例,以 test class 下的 test method 的形式存在,test method 的命名需要以 test_ 开头,例如:test_join()
|
||||
3. Class 中通常定义 init(),run(), stop()等方法,用于用例初始化数据、用例执行、用例清理环境等操作;这里,主要是为了与当前的 system-test 框架兼容,保证大家使用新规范编写的用例,可以用当前的框架执行,待新框架的整合完成后,将不存在这种要求,平台将进行统一的替换。
|
||||
4. run()方法中,需要调用该类中定义的所有 test methods, 不涉及其他逻辑
|
||||
5. 在每个 test method 中,以标准的 Python docstring 的形式,添加用例描述信息,包括多个字段,每个字段之间用空行间隔,字段及其要求,如下所示:
|
||||
| 字段名称|字段描述|是否必填|
|
||||
|:--------| ---- | :---: |
|
||||
| |用例的一句话描述,仅支持一行|是|
|
||||
| |用例的详细描述,支持多行|是|
|
||||
| Since | 用例开始支持的 TDengine 版本,新 Feature 必填 | 是 |
|
||||
| Lables | 用例标签,多个标签用英文逗号分隔,标签采用 snake case, 即全部小写,多个单词用 _ 连接|否|
|
||||
| Jira | Jira ID, 多个用英文逗号分隔 | 否 |
|
||||
| History |用例变更历史|否|
|
||||
|
||||
### 5.4 其它要求、建议
|
||||
|
||||
1. 建议相同环境/数据配置的用例放在同一文件中执行,提高运行效率
|
||||
2. 用例中的调试信息,应使用框架提供的 log 打印,例如:util.log.tdLog,而不要使用 print() 函数
|
||||
### 5.5 用例模板
|
||||
```Python
|
||||
# tests/test_new/xxx/xxx/test_xxx.py
|
||||
import ...
|
||||
|
||||
class TestXxxx:
|
||||
def init(self, args=value ...):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
...
|
||||
|
||||
|
||||
def user_defined_function(self, args=value ...):
|
||||
...
|
||||
|
||||
|
||||
def test_template(self):
|
||||
"""用例目标,必填,用一行简短总结
|
||||
<空行>
|
||||
用例详细描述,必填,允许多行
|
||||
<空行>
|
||||
Since: 用例开始支持的TDengine版本,新增用例必填
|
||||
<空行>
|
||||
Labels: 筛选标签,选填,多个标签用英文逗号分隔
|
||||
<空行>
|
||||
Jira: 相关jira任务id,选填
|
||||
<空行>
|
||||
History: 用例变更历史,选填,每行一次变更信息
|
||||
- 日期1 变更人1 变更原因1
|
||||
- 日期2 变更人2 变更原因2
|
||||
"""
|
||||
... # test case code
|
||||
|
||||
|
||||
def test_demo(self):
|
||||
"""测试超级表插入各种数据类型
|
||||
|
||||
使用多种数据类型创建超级表,向超级表插入数据,
|
||||
包括:常规数据,空数据,边界值等,插入均执行成功
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: stable, data_type
|
||||
|
||||
Jira: TD-12345, TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
- 2024-2-7 Huo Hong updated for feature TD-23456
|
||||
"""
|
||||
... # test case code
|
||||
|
||||
|
||||
def run(self):
|
||||
self.test_template()
|
||||
self.test_demo()
|
||||
...# case function list
|
||||
|
||||
|
||||
def stop(self):
|
||||
...
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
```
|
||||
说明:
|
||||
|
||||
- 在 Docstring 中,每一部分之间需要用空行分隔,否则会出现格式错误
|
||||
|
||||
## 6. 附录
|
||||
### 6.1 附录1:用例目录结构
|
||||
新增用例在 TDengine/tests/test_new 目录下,采用二级目录的形式组织,以下仅作为建议,大家可以提出 comments:
|
||||
|
||||
```
|
||||
test_new/
|
||||
├── data_write/
|
||||
│ ├── csv/
|
||||
│ ├── sql_statement/
|
||||
│ ├── stmt/
|
||||
│ ├── stmt2/
|
||||
│ └── schemaless/
|
||||
├── metadata/
|
||||
│ ├── child_table/
|
||||
│ ├── data_type/
|
||||
│ ├── db/
|
||||
│ ├── dnode/
|
||||
│ ├── mnode/
|
||||
│ ├── naming_rule/
|
||||
│ ├── qnode/
|
||||
│ ├── reqular_table/
|
||||
│ ├── snode/
|
||||
│ ├── super_table/
|
||||
│ ├── system_table/
|
||||
│ ├── tag_index/
|
||||
│ └── time_precision/
|
||||
├── high_availability/
|
||||
│ ├── 2_replica/
|
||||
│ ├── 3_replica/
|
||||
│ ├── active_active/
|
||||
│ ├── cluster_expansion_reduction/
|
||||
│ └── replica_change/
|
||||
├── operation/
|
||||
│ ├── balance/
|
||||
│ ├── configuration/
|
||||
│ ├── redistribute/
|
||||
│ ├── restore/
|
||||
│ ├── slow_query/
|
||||
│ ├── split/
|
||||
│ ├── transaction/
|
||||
│ └── upgrade/
|
||||
├── query/
|
||||
│ ├── case_when/
|
||||
│ ├── escape_character/
|
||||
│ ├── function/
|
||||
│ ├── having/
|
||||
│ ├── hint/
|
||||
│ ├── index/
|
||||
│ ├── join/
|
||||
│ ├── nested/
|
||||
│ ├── operator/
|
||||
│ ├── pseudo_column/
|
||||
│ ├── sql_syntax/
|
||||
│ ├── union/
|
||||
│ ├── view/
|
||||
│ └── window/
|
||||
├── security/
|
||||
│ ├── audit/
|
||||
│ ├── authorization/
|
||||
│ ├── permission/
|
||||
│ └── encryption/
|
||||
├── storage/
|
||||
│ ├── compress/
|
||||
│ ├── multilevel/
|
||||
│ ├── sma/
|
||||
│ ├── tsma/
|
||||
│ └── s3/
|
||||
├── stream/
|
||||
├── tdgpt/
|
||||
├── tmq/
|
||||
└── udf/
|
||||
```
|
|
@ -0,0 +1 @@
|
|||
::: metadata.time_precision.test_precision_ns
|
|
@ -0,0 +1 @@
|
|||
::: query.function.test_sin
|
|
@ -0,0 +1 @@
|
|||
::: query.hint.test_hint
|
|
@ -0,0 +1 @@
|
|||
::: query.join.test_join
|
|
@ -0,0 +1,223 @@
|
|||
import sys
|
||||
|
||||
sys.path.append("../tests/pytest")
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import tdDnodes
|
||||
from math import inf
|
||||
import taos
|
||||
|
||||
|
||||
class TestInsertStb:
|
||||
def caseDescription(self):
|
||||
'''
|
||||
case1<shenglian zhou>: [TS-3932] insert into stb
|
||||
'''
|
||||
return
|
||||
|
||||
def init(self, conn, logSql, replicaVer=1):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), True)
|
||||
self.conn = conn
|
||||
|
||||
|
||||
def restartTaosd(self, index=1, dbname="db"):
|
||||
tdDnodes.stop(index)
|
||||
tdDnodes.startWithoutSleep(index)
|
||||
tdSql.execute(f"use insert_stb")
|
||||
|
||||
|
||||
def test_normal(self):
|
||||
"""测试超级表基本插入操作
|
||||
|
||||
插入语句使用using、dbname.stable、tbname等关键字插入数据,插入单表数据,单表多条数据,多表多条数据,带标签插入等语句,执行成功
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: stable
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
|
||||
print("running {}".format("normal"))
|
||||
tdSql.execute("drop database if exists insert_stb")
|
||||
tdSql.execute("create database if not exists insert_stb")
|
||||
tdSql.execute('use insert_stb')
|
||||
tdSql.execute('create database d1')
|
||||
|
||||
tdSql.execute('create database d2')
|
||||
|
||||
tdSql.execute('use d1;')
|
||||
|
||||
tdSql.execute('create table st(ts timestamp, f int) tags(t int);')
|
||||
|
||||
tdSql.execute("insert into ct1 using st tags(1) values('2021-04-19 00:00:00', 1);")
|
||||
|
||||
tdSql.execute("insert into ct2 using st tags(2) values('2021-04-19 00:00:01', 2);")
|
||||
|
||||
tdSql.execute("insert into ct1 values('2021-04-19 00:00:02', 2);")
|
||||
|
||||
tdSql.execute('use d2;')
|
||||
|
||||
tdSql.execute('create table st(ts timestamp, f int) tags(t int);')
|
||||
|
||||
tdSql.execute("insert into ct1 using st tags(1) values('2021-04-19 00:00:00', 1);")
|
||||
|
||||
tdSql.execute("insert into ct2 using st tags(2) values('2021-04-19 00:00:01', 2);")
|
||||
|
||||
tdSql.execute('create database db1 vgroups 1;')
|
||||
|
||||
tdSql.execute('create table db1.stb (ts timestamp, c1 int, c2 int) tags(t1 int, t2 int);')
|
||||
|
||||
tdSql.execute('use d1;')
|
||||
|
||||
tdSql.execute("insert into st (tbname, ts, f, t) values('ct3', '2021-04-19 08:00:03', 3, 3);")
|
||||
|
||||
tdSql.execute("insert into d1.st (tbname, ts, f) values('ct6', '2021-04-19 08:00:04', 6);")
|
||||
|
||||
tdSql.execute("insert into d1.st (tbname, ts, f) values('ct6', '2021-04-19 08:00:05', 7)('ct8', '2021-04-19 08:00:06', 8);")
|
||||
|
||||
tdSql.execute("insert into d1.st (tbname, ts, f, t) values('ct6', '2021-04-19 08:00:07', 9, 9)('ct8', '2021-04-19 08:00:08', 10, 10);")
|
||||
|
||||
tdSql.execute("insert into d1.st (tbname, ts, f, t) values('ct6', '2021-04-19 08:00:09', 9, 9)('ct8', '2021-04-19 08:00:10', 10, 10) d2.st (tbname, ts, f, t) values('ct6', '2021-04-19 08:00:11', 9, 9)('ct8', '2021-04-19 08:00:12', 10, 10);")
|
||||
|
||||
tdSql.query('select * from d1.st order by ts;')
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(0, 0, datetime.datetime(2021, 4, 19, 0, 0))
|
||||
|
||||
|
||||
def test_insert_stb(self):
|
||||
"""测试超级表插入各种数据类型
|
||||
|
||||
使用多种数据类型创建超级表,向超级表插入数据,包括常规数据,空数据,边界值等,插入均执行成功
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: stable, dataType
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
|
||||
print("running {}".format('insert_stb'))
|
||||
self.conn.select_db('insert_stb')
|
||||
tdSql.execute('create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);')
|
||||
|
||||
tdSql.execute('insert into stb1(ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,t1,t2,t3,tbname) values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"123","1234",1,1,1,1, 1, \'1\', 1.0, \'tb1\');')
|
||||
|
||||
tdSql.execute("insert into stb1(ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,t1,t2,t3,tbname) values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, 2, '2', 2.0, 'tb1');")
|
||||
|
||||
tdSql.execute('insert into stb1(ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,t1,t2,t3,tbname) values (\'2021-11-11 09:00:02\',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL, 2, \'2\', 2.0, \'tb2\');')
|
||||
|
||||
tdSql.execute('insert into stb1(ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,t1,t2,t3,tbname) values (\'2021-11-11 09:00:03\',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3, 3, \'3\', 3.0, \'tb3\');')
|
||||
|
||||
tdSql.execute('insert into stb1(ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,t1,t2,t3,tbname) values (\'2021-11-11 09:00:04\',true,4,4,4,4,4,4,"456","4567",4,4,4,4, 4, \'4.0\', 4.0, \'tb4\');')
|
||||
|
||||
tdSql.execute('insert into stb1(ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,t1,t2,t3,tbname) values (\'2021-11-11 09:00:05\',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807, 5, \'5\', 5, \'max\' );')
|
||||
|
||||
tdSql.execute('insert into stb1(ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,t1,t2,t3,tbname) values (\'2021-11-11 09:00:06\',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0, 6, \'6\', 6, \'min\');')
|
||||
|
||||
tdSql.execute('insert into stb1(ts,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,tbname,t1,t2,t3) values (\'2021-11-11 09:00:07\',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0, \'min\', 6, \'6\', 6);')
|
||||
|
||||
tdSql.query('select tbname,* from stb1 order by ts;')
|
||||
tdSql.checkRows(8)
|
||||
tdSql.checkData(0, 0, 'tb1')
|
||||
tdSql.checkData(0, 1, datetime.datetime(2021, 11, 11, 9, 0))
|
||||
tdSql.checkData(0, 2, True)
|
||||
|
||||
def test_stmt_error(self):
|
||||
"""测试超级表插入stmt数据失败
|
||||
|
||||
创建参数绑定对象,tag设置为填充对象,绑定参数后插入预期失败
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: stable, negative
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
conn = self.conn
|
||||
conn.select_db('insert_stb')
|
||||
conn.execute('create table stb9(ts timestamp, f int) tags (t int)')
|
||||
try:
|
||||
stmt = conn.statement("insert into stb9(tbname, f, t) values('ctb91', 1, ?)")
|
||||
params = taos.new_bind_params(1)
|
||||
params[0].int(1)
|
||||
stmt.bind_param(params)
|
||||
stmt.execute()
|
||||
result = stmt.use_result()
|
||||
except Exception as err:
|
||||
print(str(err))
|
||||
|
||||
def test_consecutive_seq(self):
|
||||
"""测试超级表连续插入
|
||||
|
||||
向超级表连续插入多条数据,插入均执行成功
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: stable
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
print("running {}".format("consecutive_seq"))
|
||||
tdSql.execute("drop database if exists insert_stb3")
|
||||
tdSql.execute("create database if not exists insert_stb3")
|
||||
tdSql.execute('use insert_stb3')
|
||||
tdSql.execute('create table st (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10)) tags(t1 int, t2 float, t3 binary(10))')
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct0', 0, 0.000000, 'childtable', 1546300800000, 0, 0, 0, 0, 0.000000, 0.000000, 'hello') ('ct0', 0, 0.000000, 'childtable', 1546300800001, 1, 1, 1, 1, 1.000000, 2.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct1', 1, 1.000000, 'childtable', 1546301800000, 64, 16960, 1000000, 1000000, 1000000.000000, 2000000.000000, 'hello') ('ct1', 1, 1.000000, 'childtable', 1546301800001, 65, 16961, 1000001, 1000001, 1000001.000000, 2000002.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct2', 2, 2.000000, 'childtable', 1546302800000, -128, -31616, 2000000, 2000000, 2000000.000000, 4000000.000000, 'hello') ('ct2', 2, 2.000000, 'childtable', 1546302800001, -127, -31615, 2000001, 2000001, 2000001.000000, 4000002.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct3', 3, 3.000000, 'childtable', 1546303800000, -64, -14656, 3000000, 3000000, 3000000.000000, 6000000.000000, 'hello') ('ct3', 3, 3.000000, 'childtable', 1546303800001, -63, -14655, 3000001, 3000001, 3000001.000000, 6000002.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct4', 4, 4.000000, 'childtable', 1546304800000, 0, 2304, 4000000, 4000000, 4000000.000000, 8000000.000000, 'hello') ('ct4', 4, 4.000000, 'childtable', 1546304800001, 1, 2305, 4000001, 4000001, 4000001.000000, 8000002.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct5', 5, 5.000000, 'childtable', 1546305800000, 64, 19264, 5000000, 5000000, 5000000.000000, 10000000.000000, 'hello') ('ct5', 5, 5.000000, 'childtable', 1546305800001, 65, 19265, 5000001, 5000001, 5000001.000000, 10000002.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct6', 6, 6.000000, 'childtable', 1546306800000, -128, -29312, 6000000, 6000000, 6000000.000000, 12000000.000000, 'hello') ('ct6', 6, 6.000000, 'childtable', 1546306800001, -127, -29311, 6000001, 6000001, 6000001.000000, 12000002.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct7', 7, 7.000000, 'childtable', 1546307800000, -64, -12352, 7000000, 7000000, 7000000.000000, 14000000.000000, 'hello') ('ct7', 7, 7.000000, 'childtable', 1546307800001, -63, -12351, 7000001, 7000001, 7000001.000000, 14000002.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct8', 8, 8.000000, 'childtable', 1546308800000, 0, 4608, 8000000, 8000000, 8000000.000000, 16000000.000000, 'hello') ('ct8', 8, 8.000000, 'childtable', 1546308800001, 1, 4609, 8000001, 8000001, 8000001.000000, 16000002.000000, 'hello')")
|
||||
|
||||
tdSql.execute("insert into st(tbname, t1, t2, t3, ts, ti, si, i, bi, f, d, b) values ('ct9', 9, 9.000000, 'childtable', 1546309800000, 64, 21568, 9000000, 9000000, 9000000.000000, 18000000.000000, 'hello') ('ct9', 9, 9.000000, 'childtable', 1546309800001, 65, 21569, 9000001, 9000001, 9000001.000000, 18000002.000000, 'hello')")
|
||||
|
||||
tdSql.query('select * from st order by ts')
|
||||
tdSql.checkRows(20)
|
||||
tdSql.checkData(0, 0, datetime.datetime(2019, 1, 1, 8, 0))
|
||||
|
||||
tdSql.execute('drop database insert_stb3')
|
||||
|
||||
def run(self):
|
||||
self.test_normal()
|
||||
self.test_insert_stb()
|
||||
self.test_stmt_error()
|
||||
self.test_consecutive_seq()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TestInsertStb())
|
||||
tdCases.addLinux(__file__, TestInsertStb())
|
|
@ -0,0 +1,276 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import threading as thd
|
||||
import multiprocessing as mp
|
||||
from numpy.lib.function_base import insert
|
||||
import taos
|
||||
from taos import *
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
import datetime as dt
|
||||
from datetime import datetime
|
||||
from ctypes import *
|
||||
import time
|
||||
# constant define
|
||||
WAITS = 5 # wait seconds
|
||||
|
||||
class TestStmtSetTbnameTag:
|
||||
#
|
||||
# --------------- main frame -------------------
|
||||
def caseDescription(self):
|
||||
'''
|
||||
limit and offset keyword function test cases;
|
||||
case1: limit offset base function test
|
||||
case2: offset return valid
|
||||
'''
|
||||
return
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
# tdSql.prepare()
|
||||
# self.create_tables();
|
||||
self.ts = 1500000000000
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
# --------------- case -------------------
|
||||
|
||||
|
||||
def newcon(self,host,cfg):
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
port =6030
|
||||
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
|
||||
tdLog.debug(con)
|
||||
return con
|
||||
|
||||
def stmtExe(self,conn,sql,bindStat):
|
||||
queryStat=conn.statement("%s"%sql)
|
||||
queryStat.bind_param(bindStat)
|
||||
queryStat.execute()
|
||||
result=queryStat.use_result()
|
||||
rows=result.fetch_all()
|
||||
return rows
|
||||
|
||||
def test_stmt_set_tbname_tag(self,conn):
|
||||
"""测试参数绑定tbname和tag
|
||||
|
||||
创建参数绑定对象,绑定tbname和多种数据类型tag,插入数据成功
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: stmt,
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
dbname = "stmt_tag"
|
||||
stablename = 'log'
|
||||
try:
|
||||
conn.execute("drop database if exists %s" % dbname)
|
||||
conn.execute("create database if not exists %s PRECISION 'us' " % dbname)
|
||||
conn.select_db(dbname)
|
||||
conn.execute("create table if not exists %s(ts timestamp, bo bool, nil tinyint, ti tinyint, si smallint, ii int,\
|
||||
bi bigint, tu tinyint unsigned, su smallint unsigned, iu int unsigned, bu bigint unsigned, \
|
||||
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\
|
||||
t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \
|
||||
t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)"%stablename)
|
||||
|
||||
stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \
|
||||
values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
tags = new_bind_params(16)
|
||||
tags[0].timestamp(1626861392589123, PrecisionEnum.Microseconds)
|
||||
tags[1].bool(True)
|
||||
tags[2].bool(False)
|
||||
tags[3].tinyint(2)
|
||||
tags[4].smallint(3)
|
||||
tags[5].int(4)
|
||||
tags[6].bigint(5)
|
||||
tags[7].tinyint_unsigned(6)
|
||||
tags[8].smallint_unsigned(7)
|
||||
tags[9].int_unsigned(8)
|
||||
tags[10].bigint_unsigned(9)
|
||||
tags[11].float(10.1)
|
||||
tags[12].double(10.11)
|
||||
tags[13].binary("hello")
|
||||
tags[14].nchar("stmt")
|
||||
tags[15].timestamp(1626861392589, PrecisionEnum.Milliseconds)
|
||||
stmt.set_tbname_tags("tb1", tags)
|
||||
params = new_multi_binds(17)
|
||||
params[0].timestamp((1626861392589111, 1626861392590111, 1626861392591111))
|
||||
params[1].bool((True, None, False))
|
||||
params[2].tinyint([-128, -128, None]) # -128 is tinyint null
|
||||
params[3].tinyint([0, 127, None])
|
||||
params[4].smallint([3, None, 2])
|
||||
params[5].int([3, 4, None])
|
||||
params[6].bigint([3, 4, None])
|
||||
params[7].tinyint_unsigned([3, 4, None])
|
||||
params[8].smallint_unsigned([3, 4, None])
|
||||
params[9].int_unsigned([3, 4, None])
|
||||
params[10].bigint_unsigned([3, 4, 5])
|
||||
params[11].float([3, None, 1])
|
||||
params[12].double([3, None, 1.2])
|
||||
params[13].binary(["abc", "dddafadfadfadfadfa", None])
|
||||
params[14].nchar(["涛思数据", None, "a long string with 中文?字符"])
|
||||
params[15].timestamp([None, None, 1626861392591])
|
||||
params[16].binary(["涛思数据16", None, None])
|
||||
|
||||
stmt.bind_param_batch(params)
|
||||
stmt.execute()
|
||||
|
||||
assert stmt.affected_rows == 3
|
||||
|
||||
#query all
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].int(10)
|
||||
rows=self.stmtExe(conn,"select * from log where bu < ?",queryparam)
|
||||
tdLog.debug("assert 1st case %s"%rows)
|
||||
assert str(rows[0][0]) == "2021-07-21 17:56:32.589111"
|
||||
assert rows[0][10] == 3 , '1st case is failed'
|
||||
assert rows[1][10] == 4 , '1st case is failed'
|
||||
|
||||
#query: Numeric Functions
|
||||
queryparam=new_bind_params(2)
|
||||
queryparam[0].int(5)
|
||||
queryparam[1].int(5)
|
||||
rows=self.stmtExe(conn,"select abs(?) from log where bu < ?",queryparam)
|
||||
tdLog.debug("assert 2nd case %s"%rows)
|
||||
assert rows[0][0] == 5 , '2nd case is failed'
|
||||
assert rows[1][0] == 5 , '2nd case is failed'
|
||||
|
||||
|
||||
#query: Numeric Functions and escapes
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].int(5)
|
||||
rows=self.stmtExe(conn,"select abs(?) from log where nn= 'a? long string with 中文字符'",queryparam)
|
||||
tdLog.debug("assert 3rd case %s"%rows)
|
||||
assert rows == [] , '3rd case is failed'
|
||||
|
||||
#query: string Functions
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].binary('中文字符')
|
||||
rows=self.stmtExe(conn,"select CHAR_LENGTH(?) from log ",queryparam)
|
||||
tdLog.debug("assert 4th case %s"%rows)
|
||||
assert rows[0][0] == 4, '4th case is failed'
|
||||
assert rows[1][0] == 4, '4th case is failed'
|
||||
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].binary('123')
|
||||
rows=self.stmtExe(conn,"select CHAR_LENGTH(?) from log ",queryparam)
|
||||
tdLog.debug("assert 4th case %s"%rows)
|
||||
assert rows[0][0] == 3, '4th.1 case is failed'
|
||||
assert rows[1][0] == 3, '4th.1 case is failed'
|
||||
|
||||
#query: conversion Functions
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].binary('1232a')
|
||||
rows=self.stmtExe(conn,"select cast( ? as bigint) from log",queryparam)
|
||||
tdLog.debug("assert 5th case %s"%rows)
|
||||
assert rows[0][0] == 1232, '5th.1 case is failed'
|
||||
assert rows[1][0] == 1232, '5th.1 case is failed'
|
||||
|
||||
querystmt4=conn.statement("select cast( ? as binary(10)) from log ")
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].int(123)
|
||||
rows=self.stmtExe(conn,"select cast( ? as bigint) from log",queryparam)
|
||||
tdLog.debug("assert 6th case %s"%rows)
|
||||
assert rows[0][0] == 123, '6th.1 case is failed'
|
||||
assert rows[1][0] == 123, '6th.1 case is failed'
|
||||
|
||||
#query: datatime Functions
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].timestamp(1626861392591112)
|
||||
rows=self.stmtExe(conn,"select timediff('2021-07-21 17:56:32.590111',?,1a) from log",queryparam)
|
||||
tdLog.debug("assert 7th case %s"%rows)
|
||||
assert rows[0][0] == -1, '7th case is failed'
|
||||
assert rows[1][0] == -1, '7th case is failed'
|
||||
|
||||
#query: aggregate Functions
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].int(123)
|
||||
rows=self.stmtExe(conn,"select count(?) from log ",queryparam)
|
||||
tdLog.debug("assert 8th case %s"%rows)
|
||||
assert rows[0][0] == 3, ' 8th case is failed'
|
||||
|
||||
#query: selector Functions 9
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].int(2)
|
||||
rows=self.stmtExe(conn,"select bottom(bu,?) from log group by bu order by bu desc ; ",queryparam)
|
||||
tdLog.debug("assert 9th case %s"%rows)
|
||||
assert rows[1][0] == 4, ' 9 case is failed'
|
||||
assert rows[2][0] == 3, ' 9 case is failed'
|
||||
|
||||
# #query: time-series specific Functions 10
|
||||
|
||||
querystmt=conn.statement(" select twa(?) from log; ")
|
||||
queryparam=new_bind_params(1)
|
||||
queryparam[0].int(15)
|
||||
rows=self.stmtExe(conn," select twa(?) from log; ",queryparam)
|
||||
tdLog.debug("assert 10th case %s"%rows)
|
||||
assert rows[0][0] == 15, ' 10th case is failed'
|
||||
|
||||
|
||||
# conn.execute("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
except Exception as err:
|
||||
# conn.execute("drop database if exists %s" % dbname)
|
||||
conn.close()
|
||||
raise err
|
||||
|
||||
def run(self):
|
||||
buildPath = self.getBuildPath()
|
||||
config = buildPath+ "../sim/dnode1/cfg/"
|
||||
host="localhost"
|
||||
connectstmt=self.newcon(host,config)
|
||||
self.test_stmt_set_tbname_tag(connectstmt)
|
||||
|
||||
return
|
||||
|
||||
|
||||
# add case with filename
|
||||
#
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,66 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
from util.log import *
|
||||
|
||||
class TestReplica2:
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
tdLog.debug(f"start to init {__file__}")
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def test_replica2(self):
|
||||
"""测试双副本
|
||||
|
||||
创建双副本db,查看vgroups,停止dnode2和3,校验assigned
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: 3nodes, replica
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.run()
|
||||
def run(self):
|
||||
tdSql.execute('CREATE DATABASE db vgroups 1 replica 2;')
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
tdSql.query("show db.vgroups;")
|
||||
|
||||
if(tdSql.queryResult[0][4] == "follower") and (tdSql.queryResult[0][6] == "leader"):
|
||||
tdLog.info("stop dnode2")
|
||||
sc.dnodeStop(2)
|
||||
|
||||
if(tdSql.queryResult[0][6] == "follower") and (tdSql.queryResult[0][4] == "leader"):
|
||||
tdLog.info("stop dnode 3")
|
||||
sc.dnodeStop(3)
|
||||
|
||||
tdLog.info("wait 10 seconds")
|
||||
time.sleep(10)
|
||||
|
||||
tdSql.query("show db.vgroups;")
|
||||
|
||||
if(tdSql.queryResult[0][4] != "assigned") and (tdSql.queryResult[0][6] != "assigned"):
|
||||
tdLog.exit("failed to set aasigned")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,381 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import random
|
||||
import time
|
||||
import platform
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
class TestPrecisionNs:
|
||||
|
||||
# get col value and total max min ...
|
||||
def getColsValue(self, i, j):
|
||||
# c1 value
|
||||
if random.randint(1, 10) == 5:
|
||||
c1 = None
|
||||
else:
|
||||
c1 = 1
|
||||
|
||||
# c2 value
|
||||
if j % 3200 == 0:
|
||||
c2 = 8764231
|
||||
elif random.randint(1, 10) == 5:
|
||||
c2 = None
|
||||
else:
|
||||
c2 = random.randint(-87654297, 98765321)
|
||||
|
||||
|
||||
value = f"({self.ts}, "
|
||||
|
||||
# c1
|
||||
if c1 is None:
|
||||
value += "null,"
|
||||
else:
|
||||
self.c1Cnt += 1
|
||||
value += f"{c1},"
|
||||
# c2
|
||||
if c2 is None:
|
||||
value += "null,"
|
||||
else:
|
||||
value += f"{c2},"
|
||||
# total count
|
||||
self.c2Cnt += 1
|
||||
# max
|
||||
if self.c2Max is None:
|
||||
self.c2Max = c2
|
||||
else:
|
||||
if c2 > self.c2Max:
|
||||
self.c2Max = c2
|
||||
# min
|
||||
if self.c2Min is None:
|
||||
self.c2Min = c2
|
||||
else:
|
||||
if c2 < self.c2Min:
|
||||
self.c2Min = c2
|
||||
# sum
|
||||
if self.c2Sum is None:
|
||||
self.c2Sum = c2
|
||||
else:
|
||||
self.c2Sum += c2
|
||||
|
||||
# c3 same with ts
|
||||
value += f"{self.ts})"
|
||||
|
||||
# move next
|
||||
self.ts += 1
|
||||
|
||||
return value
|
||||
|
||||
# insert data
|
||||
def insertData(self):
|
||||
tdLog.info("insert data ....")
|
||||
sqls = ""
|
||||
for i in range(self.childCnt):
|
||||
# insert child table
|
||||
values = ""
|
||||
pre_insert = f"insert into t{i} values "
|
||||
for j in range(self.childRow):
|
||||
if values == "":
|
||||
values = self.getColsValue(i, j)
|
||||
else:
|
||||
values += "," + self.getColsValue(i, j)
|
||||
|
||||
# batch insert
|
||||
if j % self.batchSize == 0 and values != "":
|
||||
sql = pre_insert + values
|
||||
tdSql.execute(sql)
|
||||
values = ""
|
||||
# append last
|
||||
if values != "":
|
||||
sql = pre_insert + values
|
||||
tdSql.execute(sql)
|
||||
values = ""
|
||||
|
||||
sql = "flush database db;"
|
||||
tdLog.info(sql)
|
||||
tdSql.execute(sql)
|
||||
# insert finished
|
||||
tdLog.info(f"insert data successfully.\n"
|
||||
f" inserted child table = {self.childCnt}\n"
|
||||
f" inserted child rows = {self.childRow}\n"
|
||||
f" total inserted rows = {self.childCnt*self.childRow}\n")
|
||||
return
|
||||
|
||||
|
||||
# prepareEnv
|
||||
def prepareEnv(self):
|
||||
# init
|
||||
self.ts = 1680000000000*1000*1000
|
||||
self.childCnt = 5
|
||||
self.childRow = 10000
|
||||
self.batchSize = 5000
|
||||
|
||||
# total
|
||||
self.c1Cnt = 0
|
||||
self.c2Cnt = 0
|
||||
self.c2Max = None
|
||||
self.c2Min = None
|
||||
self.c2Sum = None
|
||||
|
||||
# create database db
|
||||
sql = f"create database db vgroups 2 precision 'ns' "
|
||||
tdLog.info(sql)
|
||||
tdSql.execute(sql)
|
||||
sql = f"use db"
|
||||
tdSql.execute(sql)
|
||||
|
||||
# create super talbe st
|
||||
sql = f"create table st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
|
||||
tdLog.info(sql)
|
||||
tdSql.execute(sql)
|
||||
|
||||
# create child table
|
||||
for i in range(self.childCnt):
|
||||
sql = f"create table t{i} using st tags({i}) "
|
||||
tdSql.execute(sql)
|
||||
|
||||
# create stream
|
||||
if platform.system().lower() != 'windows':
|
||||
sql = "create stream ma into sta as select count(ts) from st interval(100b)"
|
||||
tdLog.info(sql)
|
||||
tdSql.execute(sql)
|
||||
|
||||
# insert data
|
||||
self.insertData()
|
||||
|
||||
# check data correct
|
||||
def checkExpect(self, sql, expectVal):
|
||||
tdSql.query(sql)
|
||||
rowCnt = tdSql.getRows()
|
||||
for i in range(rowCnt):
|
||||
val = tdSql.getData(i,0)
|
||||
if val != expectVal:
|
||||
tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
|
||||
return False
|
||||
|
||||
tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
|
||||
return True
|
||||
|
||||
|
||||
def test_time_macro(self):
|
||||
"""测试time函数使用ns时间戳
|
||||
|
||||
创建数据库指定precision为ns,查询语句使用ns时间戳查询、timetruncate函数使用ns时间戳查询、timediff函数使用ns时间戳查询均返回正确结果
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: precision
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.checkTimeMacro()
|
||||
|
||||
# check time macro
|
||||
def checkTimeMacro(self):
|
||||
# 2 week
|
||||
val = 2
|
||||
nsval = -val*7*24*60*60*1000*1000*1000
|
||||
expectVal = self.childCnt * self.childRow
|
||||
sql = f"select count(ts) from st where timediff(ts - {val}w, ts1) = {nsval} "
|
||||
self.checkExpect(sql, expectVal)
|
||||
|
||||
# 20 day
|
||||
val = 20
|
||||
nsval = -val*24*60*60*1000*1000*1000
|
||||
uint = "d"
|
||||
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||
self.checkExpect(sql, expectVal)
|
||||
|
||||
# 30 hour
|
||||
val = 30
|
||||
nsval = -val*60*60*1000*1000*1000
|
||||
uint = "h"
|
||||
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||
self.checkExpect(sql, expectVal)
|
||||
|
||||
# 90 minutes
|
||||
val = 90
|
||||
nsval = -val*60*1000*1000*1000
|
||||
uint = "m"
|
||||
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||
self.checkExpect(sql, expectVal)
|
||||
# 2s
|
||||
val = 2
|
||||
nsval = -val*1000*1000*1000
|
||||
uint = "s"
|
||||
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||
self.checkExpect(sql, expectVal)
|
||||
# 20a
|
||||
val = 5
|
||||
nsval = -val*1000*1000
|
||||
uint = "a"
|
||||
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||
self.checkExpect(sql, expectVal)
|
||||
# 300u
|
||||
val = 300
|
||||
nsval = -val*1000
|
||||
uint = "u"
|
||||
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {nsval} "
|
||||
self.checkExpect(sql, expectVal)
|
||||
# 8b
|
||||
val = 8
|
||||
sql = f"select timediff(ts1, ts - {val}b) from st "
|
||||
self.checkExpect(sql, val)
|
||||
|
||||
# timetruncate check
|
||||
sql = '''select ts,timetruncate(ts,1u),
|
||||
timetruncate(ts,1b),
|
||||
timetruncate(ts,1m),
|
||||
timetruncate(ts,1h),
|
||||
timetruncate(ts,1w)
|
||||
from t0 order by ts desc limit 1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1, "2023-03-28 18:40:00.000009000")
|
||||
tdSql.checkData(0,2, "2023-03-28 18:40:00.000009999")
|
||||
tdSql.checkData(0,3, "2023-03-28 18:40:00.000000000")
|
||||
tdSql.checkData(0,4, "2023-03-28 18:00:00.000000000")
|
||||
tdSql.checkData(0,5, "2023-03-23 00:00:00.000000000")
|
||||
|
||||
# timediff
|
||||
sql = '''select ts,timediff(ts,ts+1b,1b),
|
||||
timediff(ts,ts+1u,1u),
|
||||
timediff(ts,ts+1a,1a),
|
||||
timediff(ts,ts+1s,1s),
|
||||
timediff(ts,ts+1m,1m),
|
||||
timediff(ts,ts+1h,1h),
|
||||
timediff(ts,ts+1d,1d),
|
||||
timediff(ts,ts+1w,1w)
|
||||
from t0 order by ts desc limit 1;'''
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(0,1, -1)
|
||||
tdSql.checkData(0,2, -1)
|
||||
tdSql.checkData(0,3, -1)
|
||||
tdSql.checkData(0,4, -1)
|
||||
tdSql.checkData(0,5, -1)
|
||||
tdSql.checkData(0,6, -1)
|
||||
tdSql.checkData(0,7, -1)
|
||||
tdSql.checkData(0,8, -1)
|
||||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
|
||||
def test_where(self):
|
||||
"""测试where使用ns时间戳
|
||||
|
||||
创建数据库指定precision为ns,查询语句中where使用ns时间戳查询均返回正确结果
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: precision
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.checkWhere()
|
||||
|
||||
|
||||
# where
|
||||
def checkWhere(self):
|
||||
cnt = 300
|
||||
start = self.ts - cnt
|
||||
sql = f"select count(ts) from st where ts >= {start} and ts <= {self.ts}"
|
||||
self.checkExpect(sql, cnt)
|
||||
|
||||
for i in range(50):
|
||||
cnt = random.randint(1,40000)
|
||||
base = 2000
|
||||
start = self.ts - cnt - base
|
||||
end = self.ts - base
|
||||
sql = f"select count(ts) from st where ts >= {start} and ts < {end}"
|
||||
self.checkExpect(sql, cnt)
|
||||
|
||||
|
||||
def test_stream(self):
|
||||
"""测试steam使用ns时间戳
|
||||
|
||||
创建数据库指定precision为ns,创建stream,查询流计算表的字段、时间戳返回正确结果
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: precision
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.checkStream()
|
||||
|
||||
# stream
|
||||
def checkStream(self):
|
||||
allRows = self.childCnt * self.childRow
|
||||
# ensure write data is expected
|
||||
sql = "select count(*) from (select diff(ts) as a from (select ts from st order by ts asc)) where a=1;"
|
||||
self.checkExpect(sql, allRows - 1)
|
||||
|
||||
# stream count is ok
|
||||
sql =f"select count(*) from sta"
|
||||
cnt = int(allRows / 100) - 1 # last window is not close, so need reduce one
|
||||
self.checkExpect(sql, cnt)
|
||||
|
||||
# check fields
|
||||
sql =f"select count(*) from sta where `count(ts)` != 100"
|
||||
self.checkExpect(sql, 0)
|
||||
|
||||
# check timestamp
|
||||
sql =f"select count(*) from (select diff(`_wstart`) from sta)"
|
||||
self.checkExpect(sql, cnt - 1)
|
||||
sql =f"select count(*) from (select diff(`_wstart`) as a from sta) where a != 100"
|
||||
self.checkExpect(sql, 0)
|
||||
|
||||
# run
|
||||
def run(self):
|
||||
# prepare env
|
||||
self.prepareEnv()
|
||||
|
||||
# time macro like 1w 1d 1h 1m 1s 1a 1u 1b
|
||||
self.checkTimeMacro()
|
||||
|
||||
# check where
|
||||
self.checkWhere()
|
||||
|
||||
# check stream
|
||||
if platform.system().lower() != 'windows':
|
||||
self.checkStream()
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,25 @@
|
|||
site_name: TDengine Case List
|
||||
docs_dir: case_list_docs
|
||||
theme:
|
||||
name: "material"
|
||||
|
||||
markdown_extensions:
|
||||
- attr_list
|
||||
- toc:
|
||||
permalink: true
|
||||
separator: "-"
|
||||
baselevel: 1
|
||||
toc_depth: 4
|
||||
plugins:
|
||||
- search
|
||||
- mkdocstrings:
|
||||
custom_templates: templates
|
||||
handlers:
|
||||
python:
|
||||
options:
|
||||
heading_level: 2
|
||||
show_root_heading: true
|
||||
show_bases: false
|
||||
show_source: false
|
||||
show_object_full_path: false
|
||||
filters: [ "^test_", "^Test" ]
|
|
@ -0,0 +1,481 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import random
|
||||
import time
|
||||
import copy
|
||||
import string
|
||||
import platform
|
||||
import taos
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
class TestSplitVGroup:
|
||||
|
||||
# random string
|
||||
def random_string(self, count):
|
||||
letters = string.ascii_letters
|
||||
return ''.join(random.choice(letters) for i in range(count))
|
||||
|
||||
# get col value and total max min ...
|
||||
def getColsValue(self, i, j):
|
||||
# c1 value
|
||||
if random.randint(1, 10) == 5:
|
||||
c1 = None
|
||||
else:
|
||||
c1 = 1
|
||||
|
||||
# c2 value
|
||||
if j % 3200 == 0:
|
||||
c2 = 8764231
|
||||
elif random.randint(1, 10) == 5:
|
||||
c2 = None
|
||||
else:
|
||||
c2 = random.randint(-87654297, 98765321)
|
||||
|
||||
|
||||
value = f"({self.ts}, "
|
||||
|
||||
# c1
|
||||
if c1 is None:
|
||||
value += "null,"
|
||||
else:
|
||||
self.c1Cnt += 1
|
||||
value += f"{c1},"
|
||||
# c2
|
||||
if c2 is None:
|
||||
value += "null,"
|
||||
else:
|
||||
value += f"{c2},"
|
||||
# total count
|
||||
self.c2Cnt += 1
|
||||
# max
|
||||
if self.c2Max is None:
|
||||
self.c2Max = c2
|
||||
else:
|
||||
if c2 > self.c2Max:
|
||||
self.c2Max = c2
|
||||
# min
|
||||
if self.c2Min is None:
|
||||
self.c2Min = c2
|
||||
else:
|
||||
if c2 < self.c2Min:
|
||||
self.c2Min = c2
|
||||
# sum
|
||||
if self.c2Sum is None:
|
||||
self.c2Sum = c2
|
||||
else:
|
||||
self.c2Sum += c2
|
||||
|
||||
# c3 same with ts
|
||||
value += f"{self.ts})"
|
||||
|
||||
# move next
|
||||
self.ts += 1
|
||||
|
||||
return value
|
||||
|
||||
# insert data
|
||||
def insertData(self):
|
||||
tdLog.info("insert data ....")
|
||||
sqls = ""
|
||||
for i in range(self.childCnt):
|
||||
# insert child table
|
||||
values = ""
|
||||
pre_insert = f"insert into @db_name.t{i} values "
|
||||
for j in range(self.childRow):
|
||||
if values == "":
|
||||
values = self.getColsValue(i, j)
|
||||
else:
|
||||
values += "," + self.getColsValue(i, j)
|
||||
|
||||
# batch insert
|
||||
if j % self.batchSize == 0 and values != "":
|
||||
sql = pre_insert + values
|
||||
self.exeDouble(sql)
|
||||
values = ""
|
||||
# append last
|
||||
if values != "":
|
||||
sql = pre_insert + values
|
||||
self.exeDouble(sql)
|
||||
values = ""
|
||||
|
||||
# insert nomal talbe
|
||||
for i in range(20):
|
||||
self.ts += 1000
|
||||
name = self.random_string(20)
|
||||
sql = f"insert into @db_name.ta values({self.ts}, {i}, {self.ts%100000}, '{name}', false)"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# insert finished
|
||||
tdLog.info(f"insert data successfully.\n"
|
||||
f" inserted child table = {self.childCnt}\n"
|
||||
f" inserted child rows = {self.childRow}\n"
|
||||
f" total inserted rows = {self.childCnt*self.childRow}\n")
|
||||
return
|
||||
|
||||
def exeDouble(self, sql):
|
||||
# dbname replace
|
||||
sql1 = sql.replace("@db_name", self.db1)
|
||||
|
||||
if len(sql1) > 100:
|
||||
tdLog.info(sql1[:100])
|
||||
else:
|
||||
tdLog.info(sql1)
|
||||
tdSql.execute(sql1)
|
||||
|
||||
sql2 = sql.replace("@db_name", self.db2)
|
||||
if len(sql2) > 100:
|
||||
tdLog.info(sql2[:100])
|
||||
else:
|
||||
tdLog.info(sql2)
|
||||
tdSql.execute(sql2)
|
||||
|
||||
|
||||
# prepareEnv
|
||||
def prepareEnv(self):
|
||||
# init
|
||||
self.ts = 1680000000000
|
||||
self.childCnt = 4
|
||||
self.childRow = 10000
|
||||
self.batchSize = 50000
|
||||
self.vgroups1 = 1
|
||||
self.vgroups2 = 1
|
||||
self.db1 = "db1"
|
||||
self.db2 = "db2"
|
||||
|
||||
# total
|
||||
self.c1Cnt = 0
|
||||
self.c2Cnt = 0
|
||||
self.c2Max = None
|
||||
self.c2Min = None
|
||||
self.c2Sum = None
|
||||
|
||||
# create database db wal_retention_period 0
|
||||
sql = f"create database @db_name vgroups {self.vgroups1} replica {self.replicaVar} wal_retention_period 0 wal_retention_size 1"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# create super talbe st
|
||||
sql = f"create table @db_name.st(ts timestamp, c1 int, c2 bigint, ts1 timestamp) tags(area int)"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# create child table
|
||||
for i in range(self.childCnt):
|
||||
sql = f"create table @db_name.t{i} using @db_name.st tags({i}) "
|
||||
self.exeDouble(sql)
|
||||
|
||||
# create normal table
|
||||
sql = f"create table @db_name.ta(ts timestamp, c1 int, c2 bigint, c3 binary(32), c4 bool)"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# insert data
|
||||
self.insertData()
|
||||
|
||||
# update
|
||||
self.ts = 1680000000000 + 20000
|
||||
self.childRow = 1000
|
||||
|
||||
|
||||
# delete data
|
||||
sql = "delete from @db_name.st where ts > 1680000019000 and ts < 1680000062000"
|
||||
self.exeDouble(sql)
|
||||
sql = "delete from @db_name.st where ts > 1680000099000 and ts < 1680000170000"
|
||||
self.exeDouble(sql)
|
||||
|
||||
# check data correct
|
||||
def checkExpect(self, sql, expectVal):
|
||||
tdSql.query(sql)
|
||||
rowCnt = tdSql.getRows()
|
||||
for i in range(rowCnt):
|
||||
val = tdSql.getData(i,0)
|
||||
if val != expectVal:
|
||||
tdLog.exit(f"Not expect . query={val} expect={expectVal} i={i} sql={sql}")
|
||||
return False
|
||||
|
||||
tdLog.info(f"check expect ok. sql={sql} expect ={expectVal} rowCnt={rowCnt}")
|
||||
return True
|
||||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
# check query result same
|
||||
def queryDouble(self, sql):
|
||||
# sql
|
||||
sql1 = sql.replace('@db_name', self.db1)
|
||||
tdLog.info(sql1)
|
||||
start1 = time.time()
|
||||
rows1 = tdSql.query(sql1,queryTimes=2)
|
||||
spend1 = time.time() - start1
|
||||
res1 = copy.deepcopy(tdSql.queryResult)
|
||||
|
||||
sql2 = sql.replace('@db_name', self.db2)
|
||||
tdLog.info(sql2)
|
||||
start2 = time.time()
|
||||
tdSql.query(sql2,queryTimes=2)
|
||||
spend2 = time.time() - start2
|
||||
res2 = tdSql.queryResult
|
||||
|
||||
rowlen1 = len(res1)
|
||||
rowlen2 = len(res2)
|
||||
errCnt = 0
|
||||
|
||||
if rowlen1 != rowlen2:
|
||||
tdLog.exit(f"both row count not equal. rowlen1={rowlen1} rowlen2={rowlen2} ")
|
||||
return False
|
||||
|
||||
for i in range(rowlen1):
|
||||
row1 = res1[i]
|
||||
row2 = res2[i]
|
||||
collen1 = len(row1)
|
||||
collen2 = len(row2)
|
||||
if collen1 != collen2:
|
||||
tdLog.exit(f"both col count not equal. collen1={collen1} collen2={collen2}")
|
||||
return False
|
||||
for j in range(collen1):
|
||||
if row1[j] != row2[j]:
|
||||
tdLog.info(f"error both column value not equal. row={i} col={j} col1={row1[j]} col2={row2[j]} .")
|
||||
errCnt += 1
|
||||
|
||||
if errCnt > 0:
|
||||
tdLog.exit(f" db2 column value different with db2. different count ={errCnt} ")
|
||||
|
||||
# warning performance
|
||||
diff = (spend2 - spend1)*100/spend1
|
||||
tdLog.info("spend1=%.6fs spend2=%.6fs diff=%.1f%%"%(spend1, spend2, diff))
|
||||
if spend2 > spend1 and diff > 20:
|
||||
tdLog.info("warning: the diff for performance after spliting is over 20%")
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# check result
|
||||
def checkResult(self):
|
||||
# check vgroupid
|
||||
sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{self.db2}'"
|
||||
tdSql.query(sql,queryTimes=2)
|
||||
tdSql.checkRows(self.vgroups2)
|
||||
|
||||
# check child table count same
|
||||
sql = "select table_name from information_schema.ins_tables where db_name='@db_name' order by table_name"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# check row value is ok
|
||||
sql = "select * from @db_name.st order by ts, tbname"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# where
|
||||
sql = "select *,tbname from @db_name.st where c1 < 1000 order by ts, tbname"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# max
|
||||
sql = "select max(c1) from @db_name.st"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# min
|
||||
sql = "select min(c2) from @db_name.st"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# sum
|
||||
sql = "select sum(c1) from @db_name.st"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# normal table
|
||||
|
||||
# count
|
||||
sql = "select count(*) from @db_name.ta"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# all rows
|
||||
sql = "select * from @db_name.ta"
|
||||
self.queryDouble(sql)
|
||||
|
||||
# sum
|
||||
sql = "select sum(c1) from @db_name.ta"
|
||||
self.queryDouble(sql)
|
||||
|
||||
|
||||
# get vgroup list
|
||||
def getVGroup(self, db_name):
|
||||
vgidList = []
|
||||
sql = f"select vgroup_id from information_schema.ins_vgroups where db_name='{db_name}'"
|
||||
res = tdSql.getResult(sql)
|
||||
rows = len(res)
|
||||
for i in range(rows):
|
||||
vgidList.append(res[i][0])
|
||||
|
||||
return vgidList;
|
||||
|
||||
# split vgroup on db2
|
||||
def splitVGroup(self, db_name):
|
||||
vgids = self.getVGroup(db_name)
|
||||
selid = random.choice(vgids)
|
||||
sql = f"split vgroup {selid}"
|
||||
tdLog.info(sql)
|
||||
tdSql.execute(sql)
|
||||
|
||||
# wait end
|
||||
seconds = 300
|
||||
for i in range(seconds):
|
||||
sql ="show transactions;"
|
||||
rows = tdSql.query(sql)
|
||||
if rows == 0:
|
||||
tdLog.info("split vgroup finished.")
|
||||
return True
|
||||
#tdLog.info(f"i={i} wait split vgroup ...")
|
||||
time.sleep(1)
|
||||
|
||||
tdLog.exit(f"split vgroup transaction is not finished after executing {seconds}s")
|
||||
return False
|
||||
|
||||
# split error
|
||||
def expectSplitError(self, dbName):
|
||||
vgids = self.getVGroup(dbName)
|
||||
selid = random.choice(vgids)
|
||||
sql = f"split vgroup {selid}"
|
||||
tdLog.info(sql)
|
||||
tdSql.error(sql)
|
||||
|
||||
# expect split ok
|
||||
def expectSplitOk(self, dbName):
|
||||
# split vgroup
|
||||
vgList1 = self.getVGroup(dbName)
|
||||
self.splitVGroup(dbName)
|
||||
vgList2 = self.getVGroup(dbName)
|
||||
vgNum1 = len(vgList1) + 1
|
||||
vgNum2 = len(vgList2)
|
||||
if vgNum1 != vgNum2:
|
||||
tdLog.exit(f" vglist len={vgNum1} is not same for expect {vgNum2}")
|
||||
return
|
||||
|
||||
# split empty database
|
||||
def splitEmptyDB(self):
|
||||
dbName = "emptydb"
|
||||
vgNum = 2
|
||||
# create database
|
||||
sql = f"create database {dbName} vgroups {vgNum} replica {self.replicaVar }"
|
||||
tdLog.info(sql)
|
||||
tdSql.execute(sql)
|
||||
|
||||
# split vgroup
|
||||
self.expectSplitOk(dbName)
|
||||
|
||||
|
||||
# forbid
|
||||
def checkForbid(self):
|
||||
# stream
|
||||
if platform.system().lower() != 'windows':
|
||||
tdLog.info("check forbid split having stream...")
|
||||
tdSql.execute("create database streamdb;")
|
||||
tdSql.execute("use streamdb;")
|
||||
tdSql.execute("create table ta(ts timestamp, age int);")
|
||||
tdSql.execute("create stream ma into sta as select count(*) from ta interval(1s);")
|
||||
self.expectSplitError("streamdb")
|
||||
tdSql.execute("drop stream ma;")
|
||||
self.expectSplitOk("streamdb")
|
||||
|
||||
# topic
|
||||
tdLog.info("check forbid split having topic...")
|
||||
tdSql.execute("create database topicdb wal_retention_period 10;")
|
||||
tdSql.execute("use topicdb;")
|
||||
tdSql.execute("create table ta(ts timestamp, age int);")
|
||||
tdSql.execute("create topic toa as select * from ta;")
|
||||
|
||||
#self.expectSplitError("topicdb")
|
||||
tdSql.execute("drop topic toa;")
|
||||
self.expectSplitOk("topicdb")
|
||||
|
||||
# compact and check db2
|
||||
def compactAndCheck(self):
|
||||
tdLog.info("compact db2 and check result ...")
|
||||
# compact
|
||||
tdSql.execute(f"compact database {self.db2};")
|
||||
# check result
|
||||
self.checkResult()
|
||||
|
||||
|
||||
def test_run(self):
|
||||
"""测试split vgroup
|
||||
|
||||
创建两个数据库,写入多条同样的数据并落盘内存中的数据,在db2上执行split vgroup,相同数据重写写入,比较两个数据库数据一致
|
||||
|
||||
Since: v3.0.6.0
|
||||
|
||||
Labels: vgroup
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
# run
|
||||
def run(self):
|
||||
# prepare env
|
||||
self.prepareEnv()
|
||||
tdLog.info("generate at least two stt files of the same fileset (e.g. v4f1944) for db2 and db1 ")
|
||||
for dbname in [self.db2, self.db1]:
|
||||
tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.010",103,103,"2023-03-28 18:41:39.999") ;')
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;')
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
tdSql.execute(f'insert into {dbname}.t1 values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;')
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
tdLog.info("check db1 and db2 same after creating ...")
|
||||
|
||||
self.checkResult()
|
||||
|
||||
for i in range(3):
|
||||
# split vgroup on db2
|
||||
start = time.time()
|
||||
self.splitVGroup(self.db2)
|
||||
end = time.time()
|
||||
self.vgroups2 += 1
|
||||
|
||||
# insert the same data per tables into splited vgroups
|
||||
tdLog.info("insert the same data per tables into splited vgroups(3,4)")
|
||||
for dbname in [self.db2, self.db1]:
|
||||
for tableid in range(self.childCnt):
|
||||
tdSql.execute(f'insert into {dbname}.t{tableid} values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999") ;')
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
tdSql.execute(f'insert into {dbname}.ta values("2023-03-28 10:40:00.100",103,103,"2023-03-28 18:41:39.999",0);')
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
|
||||
# check two db query result same
|
||||
self.checkResult()
|
||||
spend = "%.3f"%(end-start)
|
||||
tdLog.info(f"split vgroup i={i} passed. spend = {spend}s")
|
||||
|
||||
# split empty db
|
||||
self.splitEmptyDB()
|
||||
|
||||
# check topic and stream forib
|
||||
self.checkForbid()
|
||||
|
||||
# compact database
|
||||
self.compactAndCheck()
|
||||
|
||||
# stop
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,114 @@
|
|||
#!/bin/bash
|
||||
|
||||
##################################################
|
||||
#
|
||||
# Do simulation test
|
||||
#
|
||||
##################################################
|
||||
|
||||
set +e
|
||||
#set -x
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
TD_OS="Darwin"
|
||||
else
|
||||
OS=$(cat /etc/*-release | grep "^NAME=" | cut -d= -f2)
|
||||
len=$(echo ${#OS})
|
||||
len=$((len - 2))
|
||||
TD_OS=$(echo -ne ${OS:1:${len}} | cut -d" " -f1)
|
||||
fi
|
||||
|
||||
UNAME_BIN=$(which uname)
|
||||
OS_TYPE=$($UNAME_BIN)
|
||||
|
||||
cd .
|
||||
|
||||
# Get responsible directories
|
||||
CODE_DIR=$(dirname $0)
|
||||
CODE_DIR=$(pwd)
|
||||
|
||||
IN_TDINTERNAL="community"
|
||||
if [[ "$CODE_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
cd ../../..
|
||||
else
|
||||
cd ../../
|
||||
fi
|
||||
|
||||
TOP_DIR=$(pwd)
|
||||
TAOSD_DIR=$(find . -name "taosd" | grep bin | head -n1)
|
||||
|
||||
cut_opt="-f "
|
||||
|
||||
if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
BIN_DIR=$(find . -name "taosd" | grep bin | head -n1 | cut -d '/' ${cut_opt}2,3)
|
||||
else
|
||||
BIN_DIR=$(find . -name "taosd" | grep bin | head -n1 | cut -d '/' ${cut_opt}2)
|
||||
fi
|
||||
|
||||
declare -x BUILD_DIR=$TOP_DIR/$BIN_DIR
|
||||
declare -x SIM_DIR=$TOP_DIR/sim
|
||||
PROGRAM=$BUILD_DIR/build/bin/tsim
|
||||
PRG_DIR=$SIM_DIR/tsim
|
||||
ASAN_DIR=$SIM_DIR/asan
|
||||
|
||||
chmod -R 777 $PRG_DIR
|
||||
echo "------------------------------------------------------------------------"
|
||||
echo "Start TDengine Testing Case ..."
|
||||
echo "BUILD_DIR: $BUILD_DIR"
|
||||
echo "SIM_DIR : $SIM_DIR"
|
||||
echo "CODE_DIR : $CODE_DIR"
|
||||
echo "ASAN_DIR : $ASAN_DIR"
|
||||
|
||||
# prevent delete / folder or /usr/bin
|
||||
if [ ${#SIM_DIR} -lt 10 ]; then
|
||||
echo "len(SIM_DIR) < 10 , danger so exit. SIM_DIR=$SIM_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf "${SIM_DIR:?}"/*
|
||||
|
||||
mkdir -p $PRG_DIR
|
||||
mkdir -p $ASAN_DIR
|
||||
|
||||
cd "$CODE_DIR" || exit
|
||||
ulimit -n 600000
|
||||
ulimit -c unlimited
|
||||
|
||||
#sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e
|
||||
|
||||
echo "ExcuteCmd:" $*
|
||||
|
||||
if [[ "$TD_OS" == "Alpine" ]]; then
|
||||
"$@"
|
||||
else
|
||||
AsanFile=$ASAN_DIR/psim.info
|
||||
echo "AsanFile:" "$AsanFile"
|
||||
|
||||
unset LD_PRELOAD
|
||||
#export LD_PRELOAD=libasan.so.5
|
||||
#export LD_PRELOAD=$(gcc -print-file-name=libasan.so)
|
||||
export LD_PRELOAD="$(realpath "$(gcc -print-file-name=libasan.so)") $(realpath "$(gcc -print-file-name=libstdc++.so)")"
|
||||
echo "Preload AsanSo:" $?
|
||||
|
||||
$* -a 2> $AsanFile
|
||||
cat $AsanFile
|
||||
unset LD_PRELOAD
|
||||
for ((i = 1; i <= 20; i++)); do
|
||||
AsanFileLen=$(cat $AsanFile | wc -l)
|
||||
echo "AsanFileLen:" $AsanFileLen
|
||||
if [ $AsanFileLen -gt 10 ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
# check case successful
|
||||
AsanFileSuccessLen=$(grep -w "successfully executed" $AsanFile | wc -l)
|
||||
echo "AsanFileSuccessLen:" $AsanFileSuccessLen
|
||||
|
||||
if [ $AsanFileSuccessLen -gt 0 ]; then
|
||||
echo "Execute script successfully and check asan"
|
||||
$CODE_DIR/../script/sh/checkAsan.sh
|
||||
else
|
||||
echo "Execute script failure"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
|
@ -0,0 +1,628 @@
|
|||
import taos
|
||||
import sys
|
||||
import datetime
|
||||
import inspect
|
||||
import math
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
|
||||
|
||||
class TestSin:
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def prepare_datas(self, dbname="db"):
|
||||
tdSql.execute(
|
||||
f'''create table {dbname}.stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''
|
||||
create table {dbname}.t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into {dbname}.t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
def check_result_auto_sin(self ,origin_query , pow_query):
|
||||
|
||||
pow_result = tdSql.getResult(pow_query)
|
||||
|
||||
origin_result = tdSql.getResult(origin_query)
|
||||
|
||||
auto_result =[]
|
||||
|
||||
for row in origin_result:
|
||||
row_check = []
|
||||
for elem in row:
|
||||
if elem == None:
|
||||
elem = None
|
||||
else:
|
||||
elem = math.sin(elem)
|
||||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
tdSql.query(pow_query)
|
||||
for row_index , row in enumerate(pow_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index])
|
||||
|
||||
|
||||
def test_errors(self, dbname="db"):
|
||||
"""测试sin()函数error
|
||||
|
||||
执行非法select语句包含sin()函数,返回预期错误
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: sin, negative
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
error_sql_lists = [
|
||||
f"select sin from {dbname}.t1",
|
||||
# f"select sin(-+--+c1 ) from {dbname}.t1",
|
||||
# f"select +-sin(c1) from {dbname}.t1",
|
||||
# f"select ++-sin(c1) from {dbname}.t1",
|
||||
# f"select ++--sin(c1) from {dbname}.t1",
|
||||
# f"select - -sin(c1)*0 from {dbname}.t1",
|
||||
# f"select sin(tbname+1) from {dbname}.t1 ",
|
||||
f"select sin(123--123)==1 from {dbname}.t1",
|
||||
f"select sin(c1) as 'd1' from {dbname}.t1",
|
||||
f"select sin(c1 ,c2) from {dbname}.t1",
|
||||
f"select sin(c1 ,NULL ) from {dbname}.t1",
|
||||
f"select sin(,) from {dbname}.t1;",
|
||||
f"select sin(sin(c1) ab from {dbname}.t1)",
|
||||
f"select sin(c1 ) as int from {dbname}.t1",
|
||||
f"select sin from {dbname}.stb1",
|
||||
# f"select sin(-+--+c1) from {dbname}.stb1",
|
||||
# f"select +-sin(c1) from {dbname}.stb1",
|
||||
# f"select ++-sin(c1) from {dbname}.stb1",
|
||||
# f"select ++--sin(c1) from {dbname}.stb1",
|
||||
# f"select - -sin(c1)*0 from {dbname}.stb1",
|
||||
# f"select sin(tbname+1) from {dbname}.stb1 ",
|
||||
f"select sin(123--123)==1 from {dbname}.stb1",
|
||||
f"select sin(c1) as 'd1' from {dbname}.stb1",
|
||||
f"select sin(c1 ,c2 ) from {dbname}.stb1",
|
||||
f"select sin(c1 ,NULL) from {dbname}.stb1",
|
||||
f"select sin(,) from {dbname}.stb1;",
|
||||
f"select sin(sin(c1) ab from {dbname}.stb1)",
|
||||
f"select sin(c1) as int from {dbname}.stb1"
|
||||
]
|
||||
for error_sql in error_sql_lists:
|
||||
tdSql.error(error_sql)
|
||||
|
||||
|
||||
def test_types(self):
|
||||
"""测试sin()函数数据类型
|
||||
|
||||
select语句包含sin()函数,参数传入不同数据类型,不支持的类型返回预期错误,支持的类型返回执行成功
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: sin, dataType
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
- 2024-2-7 Hong updated for feature TD-23456
|
||||
|
||||
"""
|
||||
self.support_types()
|
||||
def support_types(self, dbname="db"):
|
||||
type_error_sql_lists = [
|
||||
f"select sin(ts) from {dbname}.t1" ,
|
||||
f"select sin(c7) from {dbname}.t1",
|
||||
f"select sin(c8) from {dbname}.t1",
|
||||
f"select sin(c9) from {dbname}.t1",
|
||||
f"select sin(ts) from {dbname}.ct1" ,
|
||||
f"select sin(c7) from {dbname}.ct1",
|
||||
f"select sin(c8) from {dbname}.ct1",
|
||||
f"select sin(c9) from {dbname}.ct1",
|
||||
f"select sin(ts) from {dbname}.ct3" ,
|
||||
f"select sin(c7) from {dbname}.ct3",
|
||||
f"select sin(c8) from {dbname}.ct3",
|
||||
f"select sin(c9) from {dbname}.ct3",
|
||||
f"select sin(ts) from {dbname}.ct4" ,
|
||||
f"select sin(c7) from {dbname}.ct4",
|
||||
f"select sin(c8) from {dbname}.ct4",
|
||||
f"select sin(c9) from {dbname}.ct4",
|
||||
f"select sin(ts) from {dbname}.stb1" ,
|
||||
f"select sin(c7) from {dbname}.stb1",
|
||||
f"select sin(c8) from {dbname}.stb1",
|
||||
f"select sin(c9) from {dbname}.stb1" ,
|
||||
|
||||
f"select sin(ts) from {dbname}.stbbb1" ,
|
||||
f"select sin(c7) from {dbname}.stbbb1",
|
||||
|
||||
f"select sin(ts) from {dbname}.tbname",
|
||||
f"select sin(c9) from {dbname}.tbname"
|
||||
|
||||
]
|
||||
|
||||
for type_sql in type_error_sql_lists:
|
||||
tdSql.error(type_sql)
|
||||
|
||||
|
||||
type_sql_lists = [
|
||||
f"select sin(c1) from {dbname}.t1",
|
||||
f"select sin(c2) from {dbname}.t1",
|
||||
f"select sin(c3) from {dbname}.t1",
|
||||
f"select sin(c4) from {dbname}.t1",
|
||||
f"select sin(c5) from {dbname}.t1",
|
||||
f"select sin(c6) from {dbname}.t1",
|
||||
|
||||
f"select sin(c1) from {dbname}.ct1",
|
||||
f"select sin(c2) from {dbname}.ct1",
|
||||
f"select sin(c3) from {dbname}.ct1",
|
||||
f"select sin(c4) from {dbname}.ct1",
|
||||
f"select sin(c5) from {dbname}.ct1",
|
||||
f"select sin(c6) from {dbname}.ct1",
|
||||
|
||||
f"select sin(c1) from {dbname}.ct3",
|
||||
f"select sin(c2) from {dbname}.ct3",
|
||||
f"select sin(c3) from {dbname}.ct3",
|
||||
f"select sin(c4) from {dbname}.ct3",
|
||||
f"select sin(c5) from {dbname}.ct3",
|
||||
f"select sin(c6) from {dbname}.ct3",
|
||||
|
||||
f"select sin(c1) from {dbname}.stb1",
|
||||
f"select sin(c2) from {dbname}.stb1",
|
||||
f"select sin(c3) from {dbname}.stb1",
|
||||
f"select sin(c4) from {dbname}.stb1",
|
||||
f"select sin(c5) from {dbname}.stb1",
|
||||
f"select sin(c6) from {dbname}.stb1",
|
||||
|
||||
f"select sin(c6) as alisb from {dbname}.stb1",
|
||||
f"select sin(c6) alisb from {dbname}.stb1",
|
||||
]
|
||||
|
||||
for type_sql in type_sql_lists:
|
||||
tdSql.query(type_sql)
|
||||
|
||||
def test_basic_sin_function(self):
|
||||
"""测试sin()函数基础功能
|
||||
|
||||
使用包含sin()函数的select语句,查询空表、子表、普通表、不存在表;嵌套sin函数查询均返回成功,与聚合函数一起查询返回失败
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: sin
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.basic_sin_function()
|
||||
|
||||
def basic_sin_function(self, dbname="db"):
|
||||
|
||||
# basic query
|
||||
tdSql.query(f"select c1 from {dbname}.ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query(f"select c1 from {dbname}.t1")
|
||||
tdSql.checkRows(12)
|
||||
tdSql.query(f"select c1 from {dbname}.stb1")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
# used for empty table , ct3 is empty
|
||||
tdSql.query(f"select sin(c1) from {dbname}.ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query(f"select sin(c2) from {dbname}.ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query(f"select sin(c3) from {dbname}.ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query(f"select sin(c4) from {dbname}.ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query(f"select sin(c5) from {dbname}.ct3")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.query(f"select sin(c6) from {dbname}.ct3")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
|
||||
# # used for regular table
|
||||
tdSql.query(f"select sin(c1) from {dbname}.t1")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(1 , 0, 0.841470985)
|
||||
tdSql.checkData(3 , 0, 0.141120008)
|
||||
tdSql.checkData(5 , 0, None)
|
||||
|
||||
tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
|
||||
tdSql.checkData(1, 4, 1.11000)
|
||||
tdSql.checkData(3, 3, 33)
|
||||
tdSql.checkData(5, 4, None)
|
||||
|
||||
tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
|
||||
tdSql.checkData(1, 5, 1.11000)
|
||||
tdSql.checkData(3, 4, 33)
|
||||
tdSql.checkData(5, 5, None)
|
||||
|
||||
self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from {dbname}.t1")
|
||||
|
||||
# used for sub table
|
||||
tdSql.query(f"select c2 ,sin(c2) from {dbname}.ct1")
|
||||
tdSql.checkData(0, 1, -0.220708349)
|
||||
tdSql.checkData(1 , 1, -0.556921845)
|
||||
tdSql.checkData(3 , 1, -0.798311364)
|
||||
tdSql.checkData(4 , 1, 0.000000000)
|
||||
|
||||
tdSql.query(f"select c1, c5 ,sin(c5) from {dbname}.ct4")
|
||||
tdSql.checkData(0 , 2, None)
|
||||
tdSql.checkData(1 , 2, 0.518228108)
|
||||
tdSql.checkData(2 , 2, 0.996475613)
|
||||
tdSql.checkData(3 , 2, 0.367960369)
|
||||
tdSql.checkData(5 , 2, None)
|
||||
|
||||
self.check_result_auto_sin( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from {dbname}.ct1")
|
||||
|
||||
# nest query for sin functions
|
||||
tdSql.query(f"select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from {dbname}.ct1;")
|
||||
tdSql.checkData(0 , 0 , 88)
|
||||
tdSql.checkData(0 , 1 , 0.035398303)
|
||||
tdSql.checkData(0 , 2 , 0.035390911)
|
||||
tdSql.checkData(0 , 3 , 0.035383523)
|
||||
|
||||
tdSql.checkData(1 , 0 , 77)
|
||||
tdSql.checkData(1 , 1 , 0.999520159)
|
||||
tdSql.checkData(1 , 2 , 0.841211629)
|
||||
tdSql.checkData(1 , 3 , 0.745451290)
|
||||
|
||||
tdSql.checkData(11 , 0 , -99)
|
||||
tdSql.checkData(11 , 1 , 0.999206834)
|
||||
tdSql.checkData(11 , 2 , 0.841042171)
|
||||
tdSql.checkData(11 , 3 , 0.745338326)
|
||||
|
||||
# used for stable table
|
||||
|
||||
tdSql.query(f"select sin(c1) from {dbname}.stb1")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
|
||||
# used for not exists table
|
||||
tdSql.error(f"select sin(c1) from {dbname}.stbbb1")
|
||||
tdSql.error(f"select sin(c1) from {dbname}.tbname")
|
||||
tdSql.error(f"select sin(c1) from {dbname}.ct5")
|
||||
|
||||
# mix with common col
|
||||
tdSql.query(f"select c1, sin(c1) from {dbname}.ct1")
|
||||
tdSql.query(f"select c2, sin(c2) from {dbname}.ct4")
|
||||
|
||||
|
||||
# mix with common functions
|
||||
tdSql.query(f"select c1, sin(c1),sin(c1), sin(sin(c1)) from {dbname}.ct4 ")
|
||||
tdSql.checkData(0 , 0 ,None)
|
||||
tdSql.checkData(0 , 1 ,None)
|
||||
tdSql.checkData(0 , 2 ,None)
|
||||
tdSql.checkData(0 , 3 ,None)
|
||||
|
||||
tdSql.checkData(3 , 0 , 6)
|
||||
tdSql.checkData(3 , 1 ,-0.279415498)
|
||||
tdSql.checkData(3 , 2 ,-0.279415498)
|
||||
tdSql.checkData(3 , 3 ,-0.275793863)
|
||||
|
||||
tdSql.query(f"select c1, sin(c1),c5, floor(c5) from {dbname}.stb1 ")
|
||||
|
||||
# # mix with agg functions , not support
|
||||
tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.stb1 ")
|
||||
tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.ct1 ")
|
||||
tdSql.error(f"select sin(c1), count(c5) from {dbname}.stb1 ")
|
||||
tdSql.error(f"select sin(c1), count(c5) from {dbname}.ct1 ")
|
||||
tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
|
||||
tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
|
||||
|
||||
# agg functions mix with agg functions
|
||||
|
||||
tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
|
||||
tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
|
||||
|
||||
|
||||
# # bug fix for compute
|
||||
tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-4)-0 from {dbname}.ct4 ")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
tdSql.checkData(0, 2, None)
|
||||
tdSql.checkData(1, 0, 8)
|
||||
tdSql.checkData(1, 1, 0.989358247)
|
||||
tdSql.checkData(1, 2, -0.756802495)
|
||||
|
||||
tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from {dbname}.ct4")
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(0, 1, None)
|
||||
tdSql.checkData(0, 2, None)
|
||||
tdSql.checkData(1, 0, 8)
|
||||
tdSql.checkData(1, 1, 0.989358247)
|
||||
tdSql.checkData(1, 2, 0.898941342)
|
||||
|
||||
tdSql.query(f"select c1, sin(c1), c2, sin(c2), c3, sin(c3) from {dbname}.ct1")
|
||||
|
||||
def test_big_number(self, dbname="db"):
|
||||
"""测试sin()函数大数参数
|
||||
|
||||
使用包含sin()函数的select语句查询,参数为几十位的double参数,返回结果正确
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: sin
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
|
||||
tdSql.query(f"select c1, sin(100000000) from {dbname}.ct1") # bigint to double data overflow
|
||||
tdSql.checkData(4, 1, math.sin(100000000))
|
||||
|
||||
|
||||
tdSql.query(f"select c1, sin(10000000000000) from {dbname}.ct1") # bigint to double data overflow
|
||||
tdSql.checkData(4, 1, math.sin(10000000000000))
|
||||
|
||||
tdSql.query(f"select c1, sin(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
|
||||
tdSql.query(f"select c1, sin(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
|
||||
tdSql.checkData(1, 1, math.sin(10000000000000000000000000.0))
|
||||
|
||||
tdSql.query(f"select c1, sin(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
|
||||
tdSql.query(f"select c1, sin(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
|
||||
tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000.0))
|
||||
|
||||
tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
|
||||
tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
|
||||
|
||||
tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000000000.0))
|
||||
|
||||
tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
|
||||
|
||||
|
||||
def test_func_filter(self):
|
||||
"""测试sin()函数filter
|
||||
|
||||
使用包含sin()函数的where语句查询,返回结果正确
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: sin
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.abs_func_filter()
|
||||
def abs_func_filter(self, dbname="db"):
|
||||
tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1>5 ")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,0,8)
|
||||
tdSql.checkData(0,1,8.000000000)
|
||||
tdSql.checkData(0,2,8.000000000)
|
||||
tdSql.checkData(0,3,7.900000000)
|
||||
tdSql.checkData(0,4,1.000000000)
|
||||
|
||||
tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=5 ")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,5)
|
||||
tdSql.checkData(0,1,5.000000000)
|
||||
tdSql.checkData(0,2,5.000000000)
|
||||
tdSql.checkData(0,3,4.900000000)
|
||||
tdSql.checkData(0,4,-1.000000000)
|
||||
|
||||
tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=sin(c1) limit 1 ")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,0)
|
||||
tdSql.checkData(0,1,0)
|
||||
tdSql.checkData(0,2,0.000000000)
|
||||
tdSql.checkData(0,3,0.000000000)
|
||||
tdSql.checkData(0,4,-0.100000000)
|
||||
tdSql.checkData(0,5,0.000000000)
|
||||
|
||||
|
||||
def test_basic_sin_function(self):
|
||||
"""测试sin()函数边界值
|
||||
|
||||
使用包含sin()函数的select语句,参数为不同数字类型的边界值,查询均返回成功
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: sin
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.check_boundary_values()
|
||||
def check_boundary_values(self, dbname="testdb"):
|
||||
|
||||
PI=3.1415926
|
||||
|
||||
tdSql.execute(f"drop database if exists {dbname}")
|
||||
tdSql.execute(f"create database if not exists {dbname}")
|
||||
tdSql.execute(
|
||||
f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
|
||||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)) from {dbname}.sub1_bound")
|
||||
|
||||
self.check_result_auto_sin( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from {dbname}.sub1_bound")
|
||||
|
||||
self.check_result_auto_sin(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sin(abs(c1)) from {dbname}.sub1_bound" )
|
||||
|
||||
# check basic elem for table per row
|
||||
tdSql.query(f"select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from {dbname}.sub1_bound ")
|
||||
tdSql.checkData(0,0,math.sin(2147483647))
|
||||
tdSql.checkData(0,1,math.sin(9223372036854775807))
|
||||
tdSql.checkData(0,2,math.sin(32767))
|
||||
tdSql.checkData(0,3,math.sin(127))
|
||||
tdSql.checkData(0,4,math.sin(339999995214436424907732413799364296704.00000))
|
||||
tdSql.checkData(1,0,math.sin(2147483647))
|
||||
tdSql.checkData(1,1,math.sin(9223372036854775807))
|
||||
tdSql.checkData(1,2,math.sin(32767))
|
||||
tdSql.checkData(1,3,math.sin(127))
|
||||
tdSql.checkData(1,4,math.sin(339999995214436424907732413799364296704.00000))
|
||||
tdSql.checkData(3,0,math.sin(2147483646))
|
||||
tdSql.checkData(3,1,math.sin(9223372036854775806))
|
||||
tdSql.checkData(3,2,math.sin(32766))
|
||||
tdSql.checkData(3,3,math.sin(126))
|
||||
tdSql.checkData(3,4,math.sin(339999995214436424907732413799364296704.00000))
|
||||
|
||||
# check + - * / in functions
|
||||
tdSql.query(f"select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from {dbname}.sub1_bound ")
|
||||
tdSql.checkData(0,0,math.sin(2147483648.000000000))
|
||||
tdSql.checkData(0,1,math.sin(9223372036854775807))
|
||||
tdSql.checkData(0,2,math.sin(32767.000000000))
|
||||
tdSql.checkData(0,3,math.sin(63.500000000))
|
||||
|
||||
tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
|
||||
tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
|
||||
tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
|
||||
tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
|
||||
tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
|
||||
tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
|
||||
tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
|
||||
tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
|
||||
tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
|
||||
|
||||
tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
|
||||
tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
|
||||
tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
|
||||
tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
|
||||
tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
|
||||
|
||||
self.check_result_auto_sin(f"select num1,num2 from {dbname}.tb3;" , f"select sin(num1),sin(num2) from {dbname}.tb3")
|
||||
|
||||
def test_super_table(self):
|
||||
"""测试sin()函数查询超表
|
||||
|
||||
Description: 使用包含sin()函数的select语句,查询超级表返回成功
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: sin
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
Update history:
|
||||
2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.support_super_table_test()
|
||||
def support_super_table_test(self, dbname="db"):
|
||||
self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by ts " , f"select sin(c5) from {dbname}.stb1 order by ts" )
|
||||
self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by tbname " , f"select sin(c5) from {dbname}.stb1 order by tbname" )
|
||||
self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
|
||||
self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
|
||||
|
||||
self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sin(t1), sin(c5) from {dbname}.stb1 order by ts" )
|
||||
self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 order by tbname" )
|
||||
self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
|
||||
self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) , sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table ==============")
|
||||
|
||||
self.prepare_datas()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:test errors ==============")
|
||||
|
||||
self.test_errors()
|
||||
|
||||
tdLog.printNoPrefix("==========step3:support types ============")
|
||||
|
||||
self.support_types()
|
||||
|
||||
tdLog.printNoPrefix("==========step4: sin basic query ============")
|
||||
|
||||
self.basic_sin_function()
|
||||
|
||||
tdLog.printNoPrefix("==========step5: sin filter query ============")
|
||||
|
||||
self.abs_func_filter()
|
||||
|
||||
tdLog.printNoPrefix("==========step6: big number sin query ============")
|
||||
|
||||
self.test_big_number()
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step7: sin boundary query ============")
|
||||
|
||||
self.check_boundary_values()
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step8: check sin result of stable query ============")
|
||||
|
||||
self.support_super_table_test()
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,104 @@
|
|||
from wsgiref.headers import tspecials
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TestHint:
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
self.rowNum = 10
|
||||
self.batchNum = 5
|
||||
self.ts = 1537146000000
|
||||
|
||||
def test_hint(self):
|
||||
"""测试hint查询
|
||||
|
||||
执行支持的hint查询,返回结果正确
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: stable
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.run()
|
||||
|
||||
def run(self):
|
||||
dbname = "db"
|
||||
tdSql.prepare()
|
||||
|
||||
tdSql.execute(f'''create table sta(ts timestamp, col1 int, col2 bigint) tags(tg1 int, tg2 binary(20))''')
|
||||
tdSql.execute(f"create table sta1 using sta tags(1, 'a')")
|
||||
tdSql.execute(f"create table sta2 using sta tags(2, 'b')")
|
||||
tdSql.execute(f"create table sta3 using sta tags(3, 'c')")
|
||||
tdSql.execute(f"create table sta4 using sta tags(4, 'a')")
|
||||
tdSql.execute(f"insert into sta1 values(1537146000001, 11, 110)")
|
||||
tdSql.execute(f"insert into sta1 values(1537146000002, 12, 120)")
|
||||
tdSql.execute(f"insert into sta1 values(1537146000003, 13, 130)")
|
||||
tdSql.execute(f"insert into sta2 values(1537146000001, 21, 210)")
|
||||
tdSql.execute(f"insert into sta2 values(1537146000002, 22, 220)")
|
||||
tdSql.execute(f"insert into sta2 values(1537146000003, 23, 230)")
|
||||
tdSql.execute(f"insert into sta3 values(1537146000001, 31, 310)")
|
||||
tdSql.execute(f"insert into sta3 values(1537146000002, 32, 320)")
|
||||
tdSql.execute(f"insert into sta3 values(1537146000003, 33, 330)")
|
||||
tdSql.execute(f"insert into sta4 values(1537146000001, 41, 410)")
|
||||
tdSql.execute(f"insert into sta4 values(1537146000002, 42, 420)")
|
||||
tdSql.execute(f"insert into sta4 values(1537146000003, 43, 430)")
|
||||
|
||||
tdSql.execute(f'''create table stb(ts timestamp, col1 int, col2 bigint) tags(tg1 int, tg2 binary(20))''')
|
||||
tdSql.execute(f"create table stb1 using stb tags(1, 'a')")
|
||||
tdSql.execute(f"create table stb2 using stb tags(2, 'b')")
|
||||
tdSql.execute(f"create table stb3 using stb tags(3, 'c')")
|
||||
tdSql.execute(f"create table stb4 using stb tags(4, 'a')")
|
||||
tdSql.execute(f"insert into stb1 values(1537146000001, 911, 9110)")
|
||||
tdSql.execute(f"insert into stb1 values(1537146000002, 912, 9120)")
|
||||
tdSql.execute(f"insert into stb1 values(1537146000003, 913, 9130)")
|
||||
tdSql.execute(f"insert into stb2 values(1537146000001, 921, 9210)")
|
||||
tdSql.execute(f"insert into stb2 values(1537146000002, 922, 9220)")
|
||||
tdSql.execute(f"insert into stb2 values(1537146000003, 923, 9230)")
|
||||
tdSql.execute(f"insert into stb3 values(1537146000001, 931, 9310)")
|
||||
tdSql.execute(f"insert into stb3 values(1537146000002, 932, 9320)")
|
||||
tdSql.execute(f"insert into stb3 values(1537146000003, 933, 9330)")
|
||||
tdSql.execute(f"insert into stb4 values(1537146000001, 941, 9410)")
|
||||
tdSql.execute(f"insert into stb4 values(1537146000002, 942, 9420)")
|
||||
tdSql.execute(f"insert into stb4 values(1537146000003, 943, 9430)")
|
||||
|
||||
tdSql.query(f"select /*+ batch_scan() */ count(*) from sta a, stb b where a.tg1=b.tg1 and a.ts=b.ts and b.tg2 > 'a' interval(1a);")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select /*+ no_batch_scan() */ count(*) from sta a, stb b where a.tg1=b.tg1 and a.ts=b.ts and b.tg2 > 'a' interval(1a);")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select /*+ batch_scan(a) */ count(*) from sta a, stb b where a.tg1=b.tg1 and a.ts=b.ts and b.tg2 > 'a' interval(1a);")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select /*+ batch_scan(a,) */ count(*) from sta a, stb b where a.tg1=b.tg1 and a.ts=b.ts and b.tg2 > 'a' interval(1a);")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select /*+ a,a */ count(*) from sta a, stb b where a.tg1=b.tg1 and a.ts=b.ts and b.tg2 > 'a' interval(1a);")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select /*+*/ count(*) from sta a, stb b where a.tg1=b.tg1 and a.ts=b.ts and b.tg2 > 'a' interval(1a);")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select /*+ batch_scan(),no_batch_scan() */ count(*) from sta a, stb b where a.tg1=b.tg1 and a.ts=b.ts and b.tg2 > 'a' interval(1a);")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select /*+ no_batch_scan() batch_scan() */ count(*) from sta a, stb b where a.tg1=b.tg1 and a.ts=b.ts and b.tg2 > 'a' interval(1a);")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,550 @@
|
|||
import datetime
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Any, Tuple
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c_int"
|
||||
BINT_COL = "c_bint"
|
||||
SINT_COL = "c_sint"
|
||||
TINT_COL = "c_tint"
|
||||
FLOAT_COL = "c_float"
|
||||
DOUBLE_COL = "c_double"
|
||||
BOOL_COL = "c_bool"
|
||||
TINT_UN_COL = "c_utint"
|
||||
SINT_UN_COL = "c_usint"
|
||||
BINT_UN_COL = "c_ubint"
|
||||
INT_UN_COL = "c_uint"
|
||||
BINARY_COL = "c_binary"
|
||||
NCHAR_COL = "c_nchar"
|
||||
TS_COL = "c_ts"
|
||||
|
||||
NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [BOOL_COL, ]
|
||||
TS_TYPE_COL = [TS_COL, ]
|
||||
|
||||
INT_TAG = "t_int"
|
||||
|
||||
ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL]
|
||||
TAG_COL = [INT_TAG]
|
||||
# insert data args:
|
||||
TIME_STEP = 10000
|
||||
NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
|
||||
# init db/table
|
||||
DBNAME = "db"
|
||||
STBNAME = f"{DBNAME}.stb1"
|
||||
CTBNAME = f"{DBNAME}.ct1"
|
||||
NTBNAME = f"{DBNAME}.nt1"
|
||||
|
||||
@dataclass
|
||||
class DataSet:
|
||||
ts_data : List[int] = field(default_factory=list)
|
||||
int_data : List[int] = field(default_factory=list)
|
||||
bint_data : List[int] = field(default_factory=list)
|
||||
sint_data : List[int] = field(default_factory=list)
|
||||
tint_data : List[int] = field(default_factory=list)
|
||||
int_un_data : List[int] = field(default_factory=list)
|
||||
bint_un_data: List[int] = field(default_factory=list)
|
||||
sint_un_data: List[int] = field(default_factory=list)
|
||||
tint_un_data: List[int] = field(default_factory=list)
|
||||
float_data : List[float] = field(default_factory=list)
|
||||
double_data : List[float] = field(default_factory=list)
|
||||
bool_data : List[int] = field(default_factory=list)
|
||||
binary_data : List[str] = field(default_factory=list)
|
||||
nchar_data : List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
class TestJoin:
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
|
||||
def __query_condition(self,tbname):
|
||||
query_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"{tbname}.{char_col}",
|
||||
# f"upper( {tbname}.{char_col} )",
|
||||
)
|
||||
)
|
||||
query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL)
|
||||
for num_col in NUM_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"sin( {tbname}.{num_col} )",
|
||||
)
|
||||
)
|
||||
query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_1} " for num_col_1 in NUM_COL )
|
||||
|
||||
query_condition.append(''' "test1234!@#$%^&*():'><?/.,][}{" ''')
|
||||
|
||||
return query_condition
|
||||
|
||||
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False, alias_tb1="tb1", alias_tb2="tb2"):
|
||||
table_reference = tb_list[0]
|
||||
join_condition = table_reference
|
||||
join = "inner join" if INNER else "join"
|
||||
for i in range(len(tb_list[1:])):
|
||||
join_condition += f" as {alias_tb1} {join} {tb_list[i+1]} as {alias_tb2} on {alias_tb1}.{filter}={alias_tb2}.{filter}"
|
||||
|
||||
return join_condition
|
||||
|
||||
def __where_condition(self, col=None, tbname=None, query_conditon=None):
|
||||
if query_conditon and isinstance(query_conditon, str):
|
||||
if query_conditon.startswith("count"):
|
||||
query_conditon = query_conditon[6:-1]
|
||||
elif query_conditon.startswith("max"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("sum"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("min"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
|
||||
if query_conditon:
|
||||
return f" where {query_conditon} is not null"
|
||||
if col in NUM_COL:
|
||||
return f" where abs( {tbname}.{col} ) >= 0"
|
||||
if col in CHAR_COL:
|
||||
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
|
||||
if col in BOOLEAN_COL:
|
||||
return f" where {tbname}.{col} in (false, true) "
|
||||
if col in TS_TYPE_COL or col in PRIMARY_COL:
|
||||
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
|
||||
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = None):
|
||||
if isinstance(col, str):
|
||||
if col.startswith("count"):
|
||||
col = col[6:-1]
|
||||
elif col.startswith("max"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("sum"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("min"):
|
||||
col = col[4:-1]
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __gen_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
|
||||
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
|
||||
return
|
||||
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
|
||||
|
||||
@property
|
||||
def __join_tblist(self, dbname=DBNAME):
|
||||
return [
|
||||
# ["ct1", "ct2"],
|
||||
[f"{dbname}.ct1", f"{dbname}.ct4"],
|
||||
[f"{dbname}.ct1", f"{dbname}.nt1"],
|
||||
# ["ct2", "ct4"],
|
||||
# ["ct2", "nt1"],
|
||||
# ["ct4", "nt1"],
|
||||
# ["ct1", "ct2", "ct4"],
|
||||
# ["ct1", "ct2", "nt1"],
|
||||
# ["ct1", "ct4", "nt1"],
|
||||
# ["ct2", "ct4", "nt1"],
|
||||
# ["ct1", "ct2", "ct4", "nt1"],
|
||||
]
|
||||
|
||||
@property
|
||||
def __sqls_list(self):
|
||||
sqls = []
|
||||
__join_tblist = self.__join_tblist
|
||||
for join_tblist in __join_tblist:
|
||||
alias_tb = "tb1"
|
||||
# for join_tb in join_tblist:
|
||||
select_claus_list = self.__query_condition(alias_tb)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition( col=select_claus)
|
||||
where_claus = self.__where_condition( query_conditon=select_claus )
|
||||
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
|
||||
sqls.extend(
|
||||
(
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, group_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, having_claus),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), group_claus),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), having_claus),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb)),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), where_claus, group_claus),
|
||||
self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), where_claus, having_claus),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), where_claus, ),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), having_claus ),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb), group_claus ),
|
||||
# self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True, alias_tb1=alias_tb) ),
|
||||
)
|
||||
)
|
||||
return list(filter(None, sqls))
|
||||
|
||||
|
||||
def test_join_check(self):
|
||||
"""测试join关键字合法查询
|
||||
|
||||
join语句select查询,与多种where语句、having语句、group语句组合查询,查询结果返回正确
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: join
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.__join_check()
|
||||
|
||||
def __join_check(self,):
|
||||
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
|
||||
for i in range(len(self.__sqls_list)):
|
||||
tdSql.query(self.__sqls_list[i])
|
||||
# if i % 10 == 0 :
|
||||
# tdLog.success(f"{i} sql is already executed success !")
|
||||
|
||||
def __join_check_old(self, tblist, checkrows, join_flag=True):
|
||||
query_conditions = self.__query_condition(tblist[0])
|
||||
join_condition = self.__join_condition(tb_list=tblist) if join_flag else " "
|
||||
for condition in query_conditions:
|
||||
where_condition = self.__where_condition(col=condition, tbname=tblist[0])
|
||||
group_having = self.__group_condition(col=condition, having=f"{condition} is not null " )
|
||||
group_no_having= self.__group_condition(col=condition )
|
||||
groups = ["", group_having, group_no_having]
|
||||
for group_condition in groups:
|
||||
if where_condition:
|
||||
sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} and {where_condition} {group_condition} "
|
||||
else:
|
||||
sql = f" select {condition} from {tblist[0]},{tblist[1]} where {join_condition} {group_condition} "
|
||||
|
||||
if not join_flag :
|
||||
tdSql.error(sql=sql)
|
||||
break
|
||||
if len(tblist) == 2:
|
||||
if "ct1" in tblist or "nt1" in tblist:
|
||||
self.__join_current(sql, checkrows)
|
||||
elif where_condition or "not null" in group_condition:
|
||||
self.__join_current(sql, checkrows + 2 )
|
||||
elif group_condition:
|
||||
self.__join_current(sql, checkrows + 3 )
|
||||
else:
|
||||
self.__join_current(sql, checkrows + 5 )
|
||||
if len(tblist) > 2 or len(tblist) < 1:
|
||||
tdSql.error(sql=sql)
|
||||
|
||||
def __join_current(self, sql, checkrows):
|
||||
tdSql.query(sql=sql)
|
||||
# tdSql.checkRows(checkrows)
|
||||
|
||||
def test_error(self):
|
||||
"""测试join关键字非法查询
|
||||
|
||||
join语句select查询,与不支持的where语句、having语句、group语句组合查询,查询结果返回期望错误
|
||||
|
||||
Since: v3.3.0.0
|
||||
|
||||
Labels: join, negative
|
||||
|
||||
Jira: TD-12345,TS-1234
|
||||
|
||||
History:
|
||||
- 2024-2-6 Feng Chao Created
|
||||
|
||||
"""
|
||||
self.__test_error()
|
||||
|
||||
def __test_error(self, dbname=DBNAME):
|
||||
# sourcery skip: extract-duplicate-method, move-assign-in-block
|
||||
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
|
||||
err_list_1 = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4"]
|
||||
err_list_2 = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.nt1"]
|
||||
err_list_3 = [f"{dbname}.ct1", f"{dbname}.ct4", f"{dbname}.nt1"]
|
||||
err_list_4 = [f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1"]
|
||||
err_list_5 = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1"]
|
||||
self.__join_check_old(err_list_1, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_1} over==========")
|
||||
self.__join_check_old(err_list_2, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_2} over==========")
|
||||
self.__join_check_old(err_list_3, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_3} over==========")
|
||||
self.__join_check_old(err_list_4, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_4} over==========")
|
||||
self.__join_check_old(err_list_5, -1)
|
||||
tdLog.printNoPrefix(f"==========err sql condition check in {err_list_5} over==========")
|
||||
self.__join_check_old(["ct2", "ct4"], -1, join_flag=False)
|
||||
tdLog.printNoPrefix("==========err sql condition check in has no join condition over==========")
|
||||
|
||||
tdSql.error( f"select c1, c2 from {dbname}.ct2, {dbname}.ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct2.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{INT_COL}=ct4.{INT_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct2.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{TS_COL}=ct4.{TS_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct2.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{PRIMARY_COL}=ct4.{TS_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct1.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL}" )
|
||||
tdSql.error( f"select ct2.c1, ct4.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and c1 is not null " )
|
||||
tdSql.error( f"select ct2.c1, ct4.c2 from {dbname}.ct2 as ct2, {dbname}.ct4 as ct4 where ct2.{PRIMARY_COL}=ct4.{PRIMARY_COL} and ct1.c1 is not null " )
|
||||
|
||||
|
||||
tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1"]
|
||||
|
||||
# for tb in tbname:
|
||||
# for errsql in self.__join_err_check(tb):
|
||||
# tdSql.error(sql=errsql)
|
||||
# tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
|
||||
|
||||
|
||||
def all_test(self):
|
||||
self.__join_check()
|
||||
self.__test_error()
|
||||
|
||||
|
||||
def __create_tb(self, stb="stb1", ctb_num=20, ntbnum=1, dbname=DBNAME):
|
||||
create_stb_sql = f'''create table {dbname}.{stb}(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
|
||||
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
|
||||
) tags ({INT_TAG} int)
|
||||
'''
|
||||
for i in range(ntbnum):
|
||||
|
||||
create_ntb_sql = f'''create table {dbname}.nt{i+1}(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
|
||||
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
|
||||
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(ctb_num):
|
||||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )')
|
||||
|
||||
def __data_set(self, rows):
|
||||
data_set = DataSet()
|
||||
|
||||
for i in range(rows):
|
||||
data_set.ts_data.append(NOW + 1 * (rows - i))
|
||||
data_set.int_data.append(rows - i)
|
||||
data_set.bint_data.append(11111 * (rows - i))
|
||||
data_set.sint_data.append(111 * (rows - i) % 32767)
|
||||
data_set.tint_data.append(11 * (rows - i) % 127)
|
||||
data_set.int_un_data.append(rows - i)
|
||||
data_set.bint_un_data.append(11111 * (rows - i))
|
||||
data_set.sint_un_data.append(111 * (rows - i) % 32767)
|
||||
data_set.tint_un_data.append(11 * (rows - i) % 127)
|
||||
data_set.float_data.append(1.11 * (rows - i))
|
||||
data_set.double_data.append(1100.0011 * (rows - i))
|
||||
data_set.bool_data.append((rows - i) % 2)
|
||||
data_set.binary_data.append(f'binary{(rows - i)}')
|
||||
data_set.nchar_data.append(f'nchar_测试_{(rows - i)}')
|
||||
|
||||
return data_set
|
||||
|
||||
def __insert_data(self, dbname=DBNAME):
|
||||
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
|
||||
data = self.__data_set(rows=self.rows)
|
||||
|
||||
# now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
null_data = '''null, null, null, null, null, null, null, null, null, null, null, null, null, null'''
|
||||
zero_data = "0, 0, 0, 0, 0, 0, 0, 'binary_0', 'nchar_0', 0, 0, 0, 0, 0"
|
||||
|
||||
for i in range(self.rows):
|
||||
row_data = f'''
|
||||
{data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]},
|
||||
{data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.tint_un_data[i]},
|
||||
{data.sint_un_data[i]}, {data.int_un_data[i]}, {data.bint_un_data[i]}
|
||||
'''
|
||||
neg_row_data = f'''
|
||||
{-1 * data.int_data[i]}, {-1 * data.bint_data[i]}, {-1 * data.sint_data[i]}, {-1 * data.tint_data[i]}, {-1 * data.float_data[i]}, {-1 * data.double_data[i]},
|
||||
{data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {1 * data.tint_un_data[i]},
|
||||
{1 * data.sint_un_data[i]}, {1 * data.int_un_data[i]}, {1 * data.bint_un_data[i]}
|
||||
'''
|
||||
|
||||
tdSql.execute( f"insert into {dbname}.ct1 values ( {NOW - i * TIME_STEP}, {row_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )" )
|
||||
|
||||
tdSql.execute( f"insert into {dbname}.ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.ct2 values ( {NOW - self.rows * int(TIME_STEP * 0.29) }, {null_data} )" )
|
||||
|
||||
tdSql.execute( f"insert into {dbname}.ct4 values ( {NOW + int(TIME_STEP * 0.8)}, {null_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )" )
|
||||
|
||||
tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )" )
|
||||
tdSql.execute( f"insert into {dbname}.nt1 values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )" )
|
||||
|
||||
def ts5863(self, dbname=DBNAME):
|
||||
tdSql.execute(f"CREATE STABLE {dbname}.`st_quality` (`ts` TIMESTAMP, `quality` INT, `val` NCHAR(64), `rts` TIMESTAMP) \
|
||||
TAGS (`cx` VARCHAR(10), `gyd` VARCHAR(10), `gx` VARCHAR(10), `lx` VARCHAR(10)) SMA(`ts`,`quality`,`val`)")
|
||||
|
||||
tdSql.execute(f"create table {dbname}.st_q1 using {dbname}.st_quality tags ('cx', 'gyd', 'gx1', 'lx1')")
|
||||
|
||||
sql1 = f"select t.val as batch_no, a.tbname as sample_point_code, min(cast(a.val as double)) as `min`, \
|
||||
max(cast(a.val as double)) as `max`, avg(cast(a.val as double)) as `avg` from {dbname}.st_quality t \
|
||||
left join {dbname}.st_quality a on a.ts=t.ts and a.cx=t.cx and a.gyd=t.gyd \
|
||||
where t.ts >= 1734574900000 and t.ts <= 1734575000000 \
|
||||
and t.tbname = 'st_q1' \
|
||||
and a.tbname in ('st_q2', 'st_q3') \
|
||||
group by t.val, a.tbname"
|
||||
tdSql.query(sql1)
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.execute(f"create table {dbname}.st_q2 using {dbname}.st_quality tags ('cx2', 'gyd2', 'gx2', 'lx2')")
|
||||
tdSql.execute(f"create table {dbname}.st_q3 using {dbname}.st_quality tags ('cx', 'gyd', 'gx3', 'lx3')")
|
||||
tdSql.execute(f"create table {dbname}.st_q4 using {dbname}.st_quality tags ('cx', 'gyd', 'gx4', 'lx4')")
|
||||
|
||||
tdSql.query(sql1)
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.execute(f"insert into {dbname}.st_q1 values (1734574900000, 1, '1', 1734574900000)")
|
||||
tdSql.query(sql1)
|
||||
tdSql.checkRows(0)
|
||||
tdSql.execute(f"insert into {dbname}.st_q2 values (1734574900000, 1, '1', 1734574900000)")
|
||||
tdSql.query(sql1)
|
||||
tdSql.checkRows(0)
|
||||
tdSql.execute(f"insert into {dbname}.st_q3 values (1734574900000, 1, '1', 1734574900000)")
|
||||
tdSql.query(sql1)
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.checkData(0, 1, 'st_q3')
|
||||
tdSql.checkData(0, 2, 1)
|
||||
tdSql.checkData(0, 3, 1)
|
||||
tdSql.checkData(0, 4, 1)
|
||||
|
||||
tdSql.execute(f"insert into {dbname}.st_q1 values (1734574900001, 2, '2', 1734574900000)")
|
||||
tdSql.execute(f"insert into {dbname}.st_q3 values (1734574900001, 2, '2', 1734574900000)")
|
||||
sql2 = f"select t.val as batch_no, a.tbname as sample_point_code, min(cast(a.val as double)) as `min`, \
|
||||
max(cast(a.val as double)) as `max`, avg(cast(a.val as double)) as `avg` from {dbname}.st_quality t \
|
||||
left join {dbname}.st_quality a on a.ts=t.ts and a.cx=t.cx and a.gyd=t.gyd \
|
||||
where t.ts >= 1734574900000 and t.ts <= 1734575000000 \
|
||||
and t.tbname = 'st_q1' \
|
||||
and a.tbname in ('st_q2', 'st_q3') \
|
||||
group by t.val, a.tbname order by batch_no"
|
||||
tdSql.query(sql2)
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.checkData(0, 1, 'st_q3')
|
||||
tdSql.checkData(0, 2, 1)
|
||||
tdSql.checkData(0, 3, 1)
|
||||
tdSql.checkData(0, 4, 1)
|
||||
tdSql.checkData(1, 0, 2)
|
||||
tdSql.checkData(1, 1, 'st_q3')
|
||||
tdSql.checkData(1, 2, 2)
|
||||
tdSql.checkData(1, 3, 2)
|
||||
tdSql.checkData(1, 4, 2)
|
||||
sql3 = f"select min(cast(a.val as double)) as `min` from {dbname}.st_quality t left join {dbname}.st_quality \
|
||||
a on a.ts=t.ts and a.cx=t.cx where t.tbname = 'st_q3' and a.tbname in ('st_q3', 'st_q2')"
|
||||
tdSql.execute(f"insert into {dbname}.st_q1 values (1734574900002, 2, '2', 1734574900000)")
|
||||
tdSql.execute(f"insert into {dbname}.st_q4 values (1734574900002, 2, '2', 1734574900000)")
|
||||
tdSql.execute(f"insert into {dbname}.st_q1 values (1734574900003, 3, '3', 1734574900000)")
|
||||
tdSql.execute(f"insert into {dbname}.st_q3 values (1734574900003, 3, '3', 1734574900000)")
|
||||
tdSql.query(sql3)
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
sql3 = f"select min(cast(a.val as double)) as `min`, max(cast(a.val as double)) as `max`, avg(cast(a.val as double)) as `avg` \
|
||||
from {dbname}.st_quality t left join {dbname}.st_quality a \
|
||||
on a.ts=t.ts and a.cx=t.cx where t.tbname = 'st_q3' and a.tbname in ('st_q3', 'st_q2')"
|
||||
tdSql.query(sql3)
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.checkData(0, 1, 3)
|
||||
tdSql.checkData(0, 2, 2)
|
||||
tdSql.query(sql1)
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query(sql2)
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.checkData(0, 1, 'st_q3')
|
||||
tdSql.checkData(0, 2, 1)
|
||||
tdSql.checkData(0, 3, 1)
|
||||
tdSql.checkData(0, 4, 1)
|
||||
tdSql.checkData(1, 0, 2)
|
||||
tdSql.checkData(1, 1, 'st_q3')
|
||||
tdSql.checkData(1, 2, 2)
|
||||
tdSql.checkData(1, 3, 2)
|
||||
tdSql.checkData(1, 4, 2)
|
||||
tdSql.checkData(2, 0, 3)
|
||||
tdSql.checkData(2, 1, 'st_q3')
|
||||
tdSql.checkData(2, 2, 3)
|
||||
tdSql.checkData(2, 3, 3)
|
||||
tdSql.checkData(2, 4, 3)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb(dbname=DBNAME)
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(dbname=DBNAME)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
tdSql.query(f"select count(*) from {DBNAME}.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
self.all_test()
|
||||
|
||||
tdLog.printNoPrefix("==========step4:cross db check")
|
||||
dbname1 = "db1"
|
||||
tdSql.execute(f"create database {dbname1} duration 172800m")
|
||||
tdSql.execute(f"use {dbname1}")
|
||||
self.__create_tb(dbname=dbname1)
|
||||
self.__insert_data(dbname=dbname1)
|
||||
|
||||
tdSql.query("select ct1.c_int from db.ct1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts")
|
||||
tdSql.checkRows(self.rows)
|
||||
tdSql.query("select ct1.c_int from db.stb1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts")
|
||||
tdSql.checkRows(self.rows + int(self.rows * 0.6 //3)+ int(self.rows * 0.8 // 4))
|
||||
tdSql.query("select ct1.c_int from db.nt1 as ct1 join db1.nt1 as cy1 on ct1.ts=cy1.ts")
|
||||
tdSql.checkRows(self.rows + 3)
|
||||
tdSql.query("select ct1.c_int from db.stb1 as ct1 join db1.stb1 as cy1 on ct1.ts=cy1.ts")
|
||||
tdSql.checkRows(50)
|
||||
|
||||
tdSql.query("select count(*) from db.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
tdSql.query("select count(*) from db1.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
|
||||
self.all_test()
|
||||
tdSql.query("select count(*) from db.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
tdSql.query("select count(*) from db1.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
|
||||
tdSql.execute(f"flush database {DBNAME}")
|
||||
tdSql.execute(f"flush database {dbname1}")
|
||||
# tdDnodes.stop(1)
|
||||
# tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select count(*) from db.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
tdSql.query("select count(*) from db1.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
tdSql.query("select count(*) from db.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
self.ts5863(dbname=dbname1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,474 @@
|
|||
import datetime
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __query_condition(self,tbname):
|
||||
query_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"count( {tbname}.{char_col} )",
|
||||
f"cast( {tbname}.{char_col} as nchar(3) )",
|
||||
)
|
||||
)
|
||||
|
||||
for num_col in NUM_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"log( {tbname}.{num_col}, {tbname}.{num_col})",
|
||||
)
|
||||
)
|
||||
|
||||
query_condition.extend(
|
||||
(
|
||||
''' "test12" ''',
|
||||
# 1010,
|
||||
)
|
||||
)
|
||||
|
||||
return query_condition
|
||||
|
||||
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
|
||||
table_reference = tb_list[0]
|
||||
join_condition = f'{table_reference} {table_reference.split(".")[-1]}'
|
||||
join = "inner join" if INNER else "join"
|
||||
for i in range(len(tb_list[1:])):
|
||||
join_condition += f" {join} {tb_list[i+1]} {tb_list[i+1].split('.')[-1]} on {table_reference.split('.')[-1]}.{filter}={tb_list[i+1].split('.')[-1]}.{filter}"
|
||||
|
||||
return join_condition
|
||||
|
||||
def __where_condition(self, col=None, tbname=None, query_conditon=None):
|
||||
if query_conditon and isinstance(query_conditon, str):
|
||||
if query_conditon.startswith("count"):
|
||||
query_conditon = query_conditon[6:-1]
|
||||
elif query_conditon.startswith("max"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("sum"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("min"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
|
||||
if query_conditon:
|
||||
return f" where {query_conditon} is not null"
|
||||
if col in NUM_COL:
|
||||
return f" where abs( {tbname}.{col} ) >= 0"
|
||||
if col in CHAR_COL:
|
||||
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
|
||||
if col in BOOLEAN_COL:
|
||||
return f" where {tbname}.{col} in (false, true) "
|
||||
if col in TS_TYPE_COL or col in PRIMARY_COL:
|
||||
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
|
||||
|
||||
return ""
|
||||
|
||||
def __group_condition(self, col, having = None):
|
||||
if isinstance(col, str):
|
||||
if col.startswith("count"):
|
||||
col = col[6:-1]
|
||||
elif col.startswith("max"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("sum"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("min"):
|
||||
col = col[4:-1]
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
|
||||
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
|
||||
return
|
||||
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
|
||||
|
||||
@property
|
||||
def __join_tblist(self, dbname="db"):
|
||||
return [
|
||||
[f"{dbname}.ct1", f"{dbname}.t1"],
|
||||
[f"{dbname}.ct4", f"{dbname}.t1"],
|
||||
# ["ct1", "ct2", "ct4"],
|
||||
# ["ct1", "ct2", "t1"],
|
||||
# ["ct1", "ct4", "t1"],
|
||||
# ["ct2", "ct4", "t1"],
|
||||
# ["ct1", "ct2", "ct4", "t1"],
|
||||
]
|
||||
|
||||
@property
|
||||
def __tb_list(self, dbname="db"):
|
||||
return [
|
||||
f"{dbname}.ct1",
|
||||
f"{dbname}.ct4",
|
||||
]
|
||||
|
||||
def sql_list(self):
|
||||
sqls = []
|
||||
__join_tblist = self.__join_tblist
|
||||
for join_tblist in __join_tblist:
|
||||
for join_tb in join_tblist:
|
||||
join_tb_name = join_tb.split(".")[-1]
|
||||
select_claus_list = self.__query_condition(join_tb_name)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition( col=select_claus)
|
||||
where_claus = self.__where_condition(query_conditon=select_claus)
|
||||
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null")
|
||||
sqls.extend(
|
||||
(
|
||||
self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
|
||||
)
|
||||
)
|
||||
__no_join_tblist = self.__tb_list
|
||||
for tb in __no_join_tblist:
|
||||
tb_name = join_tb.split(".")[-1]
|
||||
select_claus_list = self.__query_condition(tb_name)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition(col=select_claus)
|
||||
where_claus = self.__where_condition(query_conditon=select_claus)
|
||||
having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
|
||||
sqls.extend(
|
||||
(
|
||||
self.__single_sql(select_claus, tb, where_claus, having_claus),
|
||||
)
|
||||
)
|
||||
|
||||
# return filter(None, sqls)
|
||||
return list(filter(None, sqls))
|
||||
|
||||
def __get_type(self, col):
|
||||
if tdSql.cursor.istype(col, "BOOL"):
|
||||
return "BOOL"
|
||||
if tdSql.cursor.istype(col, "INT"):
|
||||
return "INT"
|
||||
if tdSql.cursor.istype(col, "BIGINT"):
|
||||
return "BIGINT"
|
||||
if tdSql.cursor.istype(col, "TINYINT"):
|
||||
return "TINYINT"
|
||||
if tdSql.cursor.istype(col, "SMALLINT"):
|
||||
return "SMALLINT"
|
||||
if tdSql.cursor.istype(col, "FLOAT"):
|
||||
return "FLOAT"
|
||||
if tdSql.cursor.istype(col, "DOUBLE"):
|
||||
return "DOUBLE"
|
||||
if tdSql.cursor.istype(col, "BINARY"):
|
||||
return "BINARY"
|
||||
if tdSql.cursor.istype(col, "NCHAR"):
|
||||
return "NCHAR"
|
||||
if tdSql.cursor.istype(col, "TIMESTAMP"):
|
||||
return "TIMESTAMP"
|
||||
if tdSql.cursor.istype(col, "JSON"):
|
||||
return "JSON"
|
||||
if tdSql.cursor.istype(col, "TINYINT UNSIGNED"):
|
||||
return "TINYINT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"):
|
||||
return "SMALLINT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "INT UNSIGNED"):
|
||||
return "INT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "BIGINT UNSIGNED"):
|
||||
return "BIGINT UNSIGNED"
|
||||
|
||||
def union_check(self, dbname = "db"):
|
||||
sqls = self.sql_list()
|
||||
for i in range(len(sqls)):
|
||||
tdSql.query(sqls[i])
|
||||
res1_type = self.__get_type(0)
|
||||
# if i % 5 == 0:
|
||||
# tdLog.success(f"{i} : sql is already executing!")
|
||||
for j in range(len(sqls[i:])):
|
||||
tdSql.query(sqls[j+i])
|
||||
order_union_type = False
|
||||
rev_order_type = False
|
||||
all_union_type = False
|
||||
res2_type = self.__get_type(0)
|
||||
|
||||
if res2_type == res1_type:
|
||||
all_union_type = True
|
||||
elif res1_type in ( "BIGINT" , "NCHAR" ) and res2_type in ("BIGINT" , "NCHAR"):
|
||||
all_union_type = True
|
||||
elif res1_type in ("BIGINT", "NCHAR"):
|
||||
order_union_type = True
|
||||
elif res2_type in ("BIGINT", "NCHAR"):
|
||||
rev_order_type = True
|
||||
elif res1_type == "TIMESAMP" and res2_type not in ("BINARY", "NCHAR"):
|
||||
order_union_type = True
|
||||
elif res2_type == "TIMESAMP" and res1_type not in ("BINARY", "NCHAR"):
|
||||
rev_order_type = True
|
||||
elif res1_type == "BINARY" and res2_type != "NCHAR":
|
||||
order_union_type = True
|
||||
elif res2_type == "BINARY" and res1_type != "NCHAR":
|
||||
rev_order_type = True
|
||||
|
||||
if all_union_type:
|
||||
tdSql.execute(f"{sqls[i]} union {sqls[j+i]}")
|
||||
tdSql.execute(f"{sqls[j+i]} union all {sqls[i]}")
|
||||
elif order_union_type:
|
||||
tdSql.execute(f"{sqls[i]} union all {sqls[j+i]}")
|
||||
elif rev_order_type:
|
||||
tdSql.execute(f"{sqls[j+i]} union {sqls[i]}")
|
||||
else:
|
||||
tdSql.error(f"{sqls[i]} union {sqls[j+i]}")
|
||||
|
||||
# check union with timeline function
|
||||
tdSql.query(f"select first(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1 order by ts)")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 9)
|
||||
tdSql.query(f"select last(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1 order by ts desc)")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 2147450880)
|
||||
tdSql.query(f"select irate(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1 order by ts)")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 9.102222222222222)
|
||||
tdSql.query(f"select elapsed(ts) from (select * from {dbname}.t1 union select * from {dbname}.t1 order by ts)")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 46800000.000000000000000)
|
||||
tdSql.query(f"select diff(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1 order by ts)")
|
||||
tdSql.checkRows(14)
|
||||
tdSql.query(f"select derivative(c1, 1s, 0) from (select * from {dbname}.t1 union select * from {dbname}.t1 order by ts)")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query(f"select count(*) from {dbname}.t1 as a join {dbname}.t1 as b on a.ts = b.ts and a.ts is null")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, 0)
|
||||
|
||||
tdSql.query(f"select first(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1)")
|
||||
tdSql.query(f"select last(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1)")
|
||||
tdSql.error(f"select irate(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1)")
|
||||
tdSql.error(f"select elapsed(ts) from (select * from {dbname}.t1 union select * from {dbname}.t1)")
|
||||
tdSql.error(f"select diff(c1) from (select * from {dbname}.t1 union select * from {dbname}.t1)")
|
||||
tdSql.error(f"select derivative(c1, 1s, 0) from (select * from {dbname}.t1 union select * from {dbname}.t1)")
|
||||
|
||||
|
||||
def __test_error(self, dbname="db"):
|
||||
|
||||
tdSql.error( f"show {dbname}.tables union show {dbname}.tables" )
|
||||
tdSql.error( f"create table {dbname}.errtb1 union all create table {dbname}.errtb2" )
|
||||
tdSql.error( f"drop table {dbname}.ct1 union all drop table {dbname}.ct3" )
|
||||
tdSql.error( f"select c1 from {dbname}.ct1 union all drop table {dbname}.ct3" )
|
||||
tdSql.error( f"select c1 from {dbname}.ct1 union all '' " )
|
||||
tdSql.error( f" '' union all select c1 from{dbname}. ct1 " )
|
||||
|
||||
def all_test(self):
|
||||
self.__test_error()
|
||||
self.union_check()
|
||||
|
||||
def __create_tb(self, dbname="db"):
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table {dbname}.stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (tag1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table {dbname}.t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
|
||||
|
||||
def __insert_data(self, rows, dbname="db"):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into {dbname}.ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into {dbname}.ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into {dbname}.ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into {dbname}.t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into {dbname}.t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
def test_TS_5630(self):
|
||||
sql = "CREATE DATABASE `ep_iot` BUFFER 256 CACHESIZE 20 CACHEMODEL 'both' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 3 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0"
|
||||
tdSql.execute(sql, queryTimes=1)
|
||||
tdLog.info("database ep_iot created")
|
||||
sql = "CREATE STABLE `ep_iot`.`sldc_dp` (`ts` TIMESTAMP, `data_write_time` TIMESTAMP, `jz1fdgl` DOUBLE, `jz1ssfdfh` DOUBLE, `jz1fdmh` DOUBLE, `jz1gdmh` DOUBLE, `jz1qjrhl` DOUBLE, `jz1zhcydl` DOUBLE, `jz1zkby` DOUBLE, `jz1zzqyl` DOUBLE, `jz1zzqwda` DOUBLE, `jz1zzqwdb` DOUBLE, `jz1zzqll` DOUBLE, `jz1gswd` DOUBLE, `jz1gsll` DOUBLE, `jz1glxl` DOUBLE, `jz1qjrh` DOUBLE, `jz1zhrxl` DOUBLE, `jz1gmjassllfk` DOUBLE, `jz1gmjasslllj` DOUBLE, `jz1gmjbssllfk` DOUBLE, `jz1gmjbsslllj` DOUBLE, `jz1gmjcssllfk` DOUBLE, `jz1gmjcsslllj` DOUBLE, `jz1gmjdssllfk` DOUBLE, `jz1gmjdsslllj` DOUBLE, `jz1gmjessllfk` DOUBLE, `jz1gmjesslllj` DOUBLE, `jz1gmjfssllfk` DOUBLE, `jz1gmjfsslllj` DOUBLE, `jz1zrqwda` DOUBLE, `jz1zrqwdb` DOUBLE, `jz1zrzqyl` DOUBLE, `jz1mmjadl` DOUBLE, `jz1mmjbdl` DOUBLE, `jz1mmjcdl` DOUBLE, `jz1mmjddl` DOUBLE, `jz1mmjedl` DOUBLE, `jz1mmjfdl` DOUBLE, `jz1cyqckwda` DOUBLE, `jz1cyqckwdb` DOUBLE, `jz1njswd` DOUBLE, `jz1nqqxhsckawd` DOUBLE, `jz1nqqxhsckbwd` DOUBLE, `jz1nqqxhsrkawd` DOUBLE, `jz1nqqxhsrkbwd` DOUBLE, `jz1kyqackyqwdsel` DOUBLE, `jz1kyqbckyqwdsel` DOUBLE, `jz1yfjackyqwd` DOUBLE, `jz1yfjbckyqwd` DOUBLE, `jz1trkyqwd` DOUBLE, `jz1trkyqwd1` DOUBLE, `jz1trkyqwd2` DOUBLE, `jz1trkyqwd3` DOUBLE, `jz1tckjyqwd1` DOUBLE, `jz1tckjyqwd2` DOUBLE, `jz1tckyqwd1` DOUBLE, `jz1bya` DOUBLE, `jz1byb` DOUBLE, `jz1pqwda` DOUBLE, `jz1pqwdb` DOUBLE, `jz1gmjadl` DOUBLE, `jz1gmjbdl` DOUBLE, `jz1gmjcdl` DOUBLE, `jz1gmjddl` DOUBLE, `jz1gmjedl` DOUBLE, `jz1gmjfdl` DOUBLE, `jz1yfjadl` DOUBLE, `jz1yfjbdl` DOUBLE, `jz1ycfjadl` DOUBLE, `jz1ycfjbdl` DOUBLE, `jz1sfjadl` DOUBLE, `jz1sfjbdl` DOUBLE, `jz1fdjyggl` DOUBLE, `jz1fdjwggl` DOUBLE, `jz1sjzs` DOUBLE, `jz1zfl` DOUBLE, `jz1ltyl` DOUBLE, `jz1smb` DOUBLE, `jz1rll` DOUBLE, `jz1grd` DOUBLE, `jz1zjwd` DOUBLE, `jz1yl` DOUBLE, `jz1kyqckwd` DOUBLE, `jz1abmfsybrkcy` DOUBLE, `jz1bbmfsybrkcy` DOUBLE, `jz1abjcsdmfytwdzdz` DOUBLE, `jz1bbjcsdmfytwdzdz` DOUBLE, `jz2fdgl` DOUBLE, `jz2ssfdfh` DOUBLE, `jz2fdmh` DOUBLE, `jz2gdmh` DOUBLE, `jz2qjrhl` DOUBLE, `jz2zhcydl` DOUBLE, `jz2zkby` DOUBLE, `jz2zzqyl` DOUBLE, `jz2zzqwda` DOUBLE, `jz2zzqwdb` DOUBLE, `jz2zzqll` DOUBLE, `jz2gswd` DOUBLE, `jz2gsll` DOUBLE, `jz2glxl` DOUBLE, `jz2qjrh` DOUBLE, `jz2zhrxl` DOUBLE, `jz2gmjassllfk` DOUBLE, `jz2gmjasslllj` DOUBLE, `jz2gmjbssllfk` DOUBLE, `jz2gmjbsslllj` DOUBLE, `jz2gmjcssllfk` DOUBLE, `jz2gmjcsslllj` DOUBLE, `jz2gmjdssllfk` DOUBLE, `jz2gmjdsslllj` DOUBLE, `jz2gmjessllfk` DOUBLE, `jz2gmjesslllj` DOUBLE, `jz2gmjfssllfk` DOUBLE, `jz2gmjfsslllj` DOUBLE, `jz2zrqwda` DOUBLE, `jz2zrqwdb` DOUBLE, `jz2zrzqyl` DOUBLE, `jz2mmjadl` DOUBLE, `jz2mmjbdl` DOUBLE, `jz2mmjcdl` DOUBLE, `jz2mmjddl` DOUBLE, `jz2mmjedl` DOUBLE, `jz2mmjfdl` DOUBLE, `jz2cyqckwda` DOUBLE, `jz2cyqckwdb` DOUBLE, `jz2njswd` DOUBLE, `jz2nqqxhsckawd` DOUBLE, `jz2nqqxhsckbwd` DOUBLE, `jz2nqqxhsrkawd` DOUBLE, `jz2nqqxhsrkbwd` DOUBLE, `jz2kyqackyqwdsel` DOUBLE, `jz2kyqbckyqwdsel` DOUBLE, `jz2yfjackyqwd` DOUBLE, `jz2yfjbckyqwd` DOUBLE, `jz2trkyqwd` DOUBLE, `jz2trkyqwd1` DOUBLE, `jz2trkyqwd2` DOUBLE, `jz2trkyqwd3` DOUBLE, `jz2tckjyqwd1` DOUBLE, `jz2tckjyqwd2` DOUBLE, `jz2tckyqwd1` DOUBLE, `jz2bya` DOUBLE, `jz2byb` DOUBLE, `jz2pqwda` DOUBLE, `jz2pqwdb` DOUBLE, `jz2gmjadl` DOUBLE, `jz2gmjbdl` DOUBLE, `jz2gmjcdl` DOUBLE, `jz2gmjddl` DOUBLE, `jz2gmjedl` DOUBLE, `jz2gmjfdl` DOUBLE, `jz2yfjadl` DOUBLE, `jz2yfjbdl` DOUBLE, `jz2ycfjadl` DOUBLE, `jz2ycfjbdl` DOUBLE, `jz2sfjadl` DOUBLE, `jz2sfjbdl` DOUBLE, `jz2fdjyggl` DOUBLE, `jz2fdjwggl` DOUBLE, `jz2sjzs` DOUBLE, `jz2zfl` DOUBLE, `jz2ltyl` DOUBLE, `jz2smb` DOUBLE, `jz2rll` DOUBLE, `jz2grd` DOUBLE, `jz2zjwd` DOUBLE, `jz2yl` DOUBLE, `jz2kyqckwd` DOUBLE, `jz2abmfsybrkcy` DOUBLE, `jz2bbmfsybrkcy` DOUBLE, `jz2abjcsdmfytwdzdz` DOUBLE, `jz2bbjcsdmfytwdzdz` DOUBLE) TAGS (`iot_hub_id` VARCHAR(100), `device_group_code` VARCHAR(100), `device_code` VARCHAR(100))"
|
||||
tdLog.info("stable ep_iot.sldc_dp created")
|
||||
tdSql.execute(sql, queryTimes=1)
|
||||
sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('a','a','a') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
|
||||
tdSql.execute(sql, queryTimes=1)
|
||||
sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('b','b','b') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
|
||||
tdSql.execute(sql, queryTimes=1)
|
||||
sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('c','c','c') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
|
||||
tdSql.execute(sql, queryTimes=1)
|
||||
sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('d','d','d') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
|
||||
tdSql.execute(sql, queryTimes=1)
|
||||
sql = "insert into ep_iot.sldc_dp_t1 using ep_iot.sldc_dp tags('e','e','e') values(now, now, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9, 0,1,2,3,4,5,6,7,8,9,0,1);"
|
||||
tdSql.execute(sql, queryTimes=1)
|
||||
sql = "select scdw_code, scdw_name, jzmc, fdgl, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkCols(5)
|
||||
tdSql.checkRows(6)
|
||||
|
||||
sql = "select scdw_name, scdw_code, jzmc, fdgl, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkCols(5)
|
||||
tdSql.checkRows(6)
|
||||
sql = "select scdw_name, scdw_code, jzzt from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkRows(6)
|
||||
tdSql.checkCols(3)
|
||||
|
||||
sql = "select scdw_code, scdw_name, jzmc, fdgl, jzzt,ts from ((select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组1' as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01072016' as scdw_code, '盛鲁电厂' as scdw_name, '机组2' as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '00103673' as scdw_code, '鲁西电厂' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt, last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组1'as jzmc, last(jz1fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp) union all ( select '01061584' as scdw_code, '富源热电' as scdw_name, '机组2'as jzmc, last(jz2fdjyggl) as fdgl, '填报' as jzzt ,last(ts) as ts from ep_iot.sldc_dp)) where scdw_code like '%%';"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkCols(6)
|
||||
tdSql.checkRows(6)
|
||||
##tdSql.execute("drop database ep_iot")
|
||||
|
||||
def test_case_for_nodes_match_node(self):
|
||||
sql = "create table db.nt (ts timestamp, c1 int primary key, c2 int)"
|
||||
tdSql.execute(sql, queryTimes=1)
|
||||
sql = 'select diff (ts) from (select * from db.tt union select * from db.tt order by c1, case when ts < now - 1h then ts + 1h else ts end) partition by c1, case when ts < now - 1h then ts + 1h else ts end'
|
||||
tdSql.error(sql, -2147473917)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
self.test_TS_5630()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdSql.execute("flush database db")
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
self.test_TD_33137()
|
||||
self.test_case_for_nodes_match_node()
|
||||
|
||||
def test_TD_33137(self):
|
||||
sql = "select 'asd' union all select 'asdasd'"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkRows(2)
|
||||
sql = "select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, stable_name `TABLE_NAME`, 'TABLE' `TABLE_TYPE`, table_comment `REMARKS` from information_schema.ins_stables union all select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, table_name `TABLE_NAME`, case when `type`='SYSTEM_TABLE' then 'TABLE' when `type`='NORMAL_TABLE' then 'TABLE' when `type`='CHILD_TABLE' then 'TABLE' else 'UNKNOWN' end `TABLE_TYPE`, table_comment `REMARKS` from information_schema.ins_tables union all select db_name `TABLE_CAT`, '' `TABLE_SCHEM`, view_name `TABLE_NAME`, 'VIEW' `TABLE_TYPE`, NULL `REMARKS` from information_schema.ins_views"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkRows(49)
|
||||
|
||||
sql = "select null union select null"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
||||
sql = "select null union all select null"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(1, 0, None)
|
||||
|
||||
sql = "select null union select 1"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(1, 0, 1)
|
||||
|
||||
sql = "select null union select 'asd'"
|
||||
tdSql.query(sql, queryTimes=1)
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, None)
|
||||
tdSql.checkData(1, 0, 'asd')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,602 @@
|
|||
|
||||
import taos
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
sys.path.append(os.path.dirname(Path(__file__).resolve().parent.parent.parent) + "/7-tmq")
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
from util.sqlset import *
|
||||
from tmqCommon import *
|
||||
|
||||
class TDTestCase:
|
||||
"""This test case is used to veirfy the tmq consume data from non marterial view
|
||||
"""
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.setsql = TDSetSql()
|
||||
|
||||
# db info
|
||||
self.dbname = "view_db"
|
||||
self.stbname = 'stb'
|
||||
self.ctbname_list = ["ct1", "ct2"]
|
||||
self.stable_column_dict = {
|
||||
'ts': 'timestamp',
|
||||
'col1': 'float',
|
||||
'col2': 'int',
|
||||
}
|
||||
self.tag_dict = {
|
||||
'ctbname': 'binary(10)'
|
||||
}
|
||||
|
||||
def prepare_data(self, conn=None):
|
||||
"""Create the db and data for test
|
||||
"""
|
||||
tdLog.debug("Start to prepare the data")
|
||||
if not conn:
|
||||
conn = tdSql
|
||||
# create datebase
|
||||
conn.execute(f"create database {self.dbname}")
|
||||
conn.execute(f"use {self.dbname}")
|
||||
time.sleep(2)
|
||||
|
||||
# create stable
|
||||
conn.execute(self.setsql.set_create_stable_sql(self.stbname, self.stable_column_dict, self.tag_dict))
|
||||
tdLog.debug("Create stable {} successfully".format(self.stbname))
|
||||
|
||||
# create child tables
|
||||
for ctname in self.ctbname_list:
|
||||
conn.execute(f"create table {ctname} using {self.stbname} tags('{ctname}');")
|
||||
tdLog.debug("Create child table {} successfully".format(ctname))
|
||||
|
||||
# insert data into child tables
|
||||
conn.execute(f"insert into {ctname} values(now, 1.1, 1)(now+1s, 2.2, 2)(now+2s, 3.3, 3)(now+3s, 4.4, 4)(now+4s, 5.5, 5)(now+5s, 6.6, 6)(now+6s, 7.7, 7)(now+7s, 8.8, 8)(now+8s, 9.9, 9)(now+9s, 10.1, 10);)")
|
||||
tdLog.debug(f"Insert into data to {ctname} successfully")
|
||||
|
||||
def prepare_tmq_data(self, para_dic):
|
||||
tdLog.debug("Start to prepare the tmq data")
|
||||
tmqCom.initConsumerTable()
|
||||
tdCom.create_database(tdSql, para_dic["dbName"], para_dic["dropFlag"], vgroups=para_dic["vgroups"], replica=1)
|
||||
tdLog.info("create stb")
|
||||
tdCom.create_stable(tdSql, dbname=para_dic["dbName"], stbname=para_dic["stbName"], column_elm_list=para_dic['colSchema'], tag_elm_list=para_dic['tagSchema'])
|
||||
tdLog.info("create ctb")
|
||||
tdCom.create_ctable(tdSql, dbname=para_dic["dbName"], stbname=para_dic["stbName"],tag_elm_list=para_dic['tagSchema'], count=para_dic["ctbNum"], default_ctbname_prefix=para_dic['ctbPrefix'])
|
||||
tdLog.info("insert data")
|
||||
tmqCom.insert_data(tdSql, para_dic["dbName"], para_dic["ctbPrefix"], para_dic["ctbNum"], para_dic["rowsPerTbl"], para_dic["batchNum"], para_dic["startTs"])
|
||||
tdLog.debug("Finish to prepare the tmq data")
|
||||
|
||||
def check_view_num(self, num):
|
||||
tdSql.query("show views;")
|
||||
rows = tdSql.queryRows
|
||||
assert(rows == num)
|
||||
tdLog.debug(f"Verify the view number successfully")
|
||||
|
||||
def create_user(self, username, password):
|
||||
tdSql.execute(f"create user {username} pass '{password}';")
|
||||
tdSql.execute(f"alter user {username} createdb 1;")
|
||||
tdLog.debug("Create user {} with password {} successfully".format(username, password))
|
||||
|
||||
def check_permissions(self, username, db_name, permission_dict, view_name=None):
|
||||
"""
|
||||
:param permission_dict: {'db': ["read", "write], 'view': ["read", "write", "alter"]}
|
||||
"""
|
||||
tdSql.query("select * from information_schema.ins_user_privileges;")
|
||||
for item in permission_dict.keys():
|
||||
if item == "db":
|
||||
for permission in permission_dict[item]:
|
||||
assert((username, permission, db_name, "", "", "") in tdSql.queryResult)
|
||||
tdLog.debug(f"Verify the {item} {db_name} {permission} permission successfully")
|
||||
elif item == "view":
|
||||
for permission in permission_dict[item]:
|
||||
assert((username, permission, db_name, view_name, "", "view") in tdSql.queryResult)
|
||||
tdLog.debug(f"Verify the {item} {db_name} {view_name} {permission} permission successfully")
|
||||
else:
|
||||
raise Exception(f"Invalid permission type: {item}")
|
||||
|
||||
def test_create_view_from_one_database(self):
|
||||
"""This test case is used to verify the create view from one database
|
||||
"""
|
||||
self.prepare_data()
|
||||
tdSql.execute(f"create view v1 as select * from {self.stbname};")
|
||||
self.check_view_num(1)
|
||||
tdSql.error(f'create view v1 as select * from {self.stbname};', expectErrInfo='view already exists in db')
|
||||
tdSql.error(f'create view db2.v2 as select * from {self.stbname};', expectErrInfo='Fail to get table info, error: Database not exist')
|
||||
tdSql.error(f'create view v2 as select c2 from {self.stbname};', expectErrInfo='Invalid column name: c2')
|
||||
tdSql.error(f'create view v2 as select ts, col1 from tt1;', expectErrInfo='Fail to get table info, error: Table does not exist')
|
||||
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdLog.debug("Finish test case 'test_create_view_from_one_database'")
|
||||
|
||||
def test_create_view_from_multi_database(self):
|
||||
"""This test case is used to verify the create view from multi database
|
||||
"""
|
||||
self.prepare_data()
|
||||
tdSql.execute(f"create view v1 as select * from view_db.{self.stbname};")
|
||||
self.check_view_num(1)
|
||||
|
||||
self.dbname = "view_db2"
|
||||
self.prepare_data()
|
||||
tdSql.execute(f"create view v1 as select * from view_db2.{self.stbname};")
|
||||
tdSql.execute(f"create view v2 as select * from view_db.v1;")
|
||||
self.check_view_num(2)
|
||||
|
||||
self.dbname = "view_db"
|
||||
tdSql.execute(f"drop database view_db;")
|
||||
tdSql.execute(f"drop database view_db2;")
|
||||
tdLog.debug("Finish test case 'test_create_view_from_multi_database'")
|
||||
|
||||
def test_create_view_name_params(self):
|
||||
"""This test case is used to verify the create view with different view name params
|
||||
"""
|
||||
self.prepare_data()
|
||||
tdSql.execute(f"create view v1 as select * from {self.stbname};")
|
||||
self.check_view_num(1)
|
||||
tdSql.error(f"create view v/2 as select * from {self.stbname};", expectErrInfo='syntax error near "/2 as select * from stb;"')
|
||||
tdSql.execute(f"create view v2 as select ts, col1 from {self.stbname};")
|
||||
self.check_view_num(2)
|
||||
view_name_192_characters = "rzuoxoIXilAGgzNjYActiQwgzZK7PZYpDuaOe1lSJMFMVYXaexh1OfMmk3LvJcQbTeXXW7uGJY8IHuweHF73VHgoZgf0waO33YpZiTKfDQbdWtN4YmR2eWjL84ZtkfjM4huCP6lCysbDMj8YNwWksTdUq70LIyNhHp2V8HhhxyYSkREYFLJ1kOE78v61MQT6"
|
||||
tdSql.execute(f"create view {view_name_192_characters} as select * from {self.stbname};")
|
||||
self.check_view_num(3)
|
||||
tdSql.error(f"create view {view_name_192_characters}1 as select * from {self.stbname};", expectErrInfo='Invalid identifier name: rzuoxoixilaggznjyactiqwgzzk7pzypduaoe1lsjmfmvyxaexh1ofmmk3lvjcqbtexxw7ugjy8ihuwehf73vhgozgf0wao33ypzitkfdqbdwtn4ymr2ewjl84ztkfjm4hucp6lcysbdmj8ynwwkstduq70liynhhp2v8hhhxyyskreyflj1koe78v61mqt61 as select * from stb;')
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdLog.debug("Finish test case 'test_create_view_name_params'")
|
||||
|
||||
def test_create_view_query(self):
|
||||
"""This test case is used to verify the create view with different data type in query
|
||||
"""
|
||||
self.prepare_data()
|
||||
# add different data type table
|
||||
tdSql.execute(f"create table tb (ts timestamp, c1 int, c2 int unsigned, c3 bigint, c4 bigint unsigned, c5 float, c6 double, c7 binary(16), c8 smallint, c9 smallint unsigned, c10 tinyint, c11 tinyint unsigned, c12 bool, c13 varchar(16), c14 nchar(8), c15 geometry(21), c16 varbinary(16));")
|
||||
tdSql.execute(f"create view v1 as select ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16 from tb;")
|
||||
# check data type in create view sql
|
||||
tdSql.query("desc v1;")
|
||||
res = tdSql.queryResult
|
||||
data_type_list = [res[index][1] for index in range(len(res))]
|
||||
tdLog.debug(data_type_list)
|
||||
assert('TIMESTAMP' in data_type_list and 'INT' in data_type_list and 'INT UNSIGNED' in data_type_list and 'BIGINT' in data_type_list and 'BIGINT UNSIGNED' in data_type_list and 'FLOAT' in data_type_list and 'DOUBLE' in data_type_list and 'VARCHAR' in data_type_list and 'SMALLINT' in data_type_list and 'SMALLINT UNSIGNED' in data_type_list and 'TINYINT' in data_type_list and 'TINYINT UNSIGNED' in data_type_list and 'BOOL' in data_type_list and 'VARCHAR' in data_type_list and 'NCHAR' in data_type_list and 'GEOMETRY' in data_type_list and 'VARBINARY' in data_type_list)
|
||||
tdSql.execute("create view v2 as select * from tb where c1 >5 and c7 like '%ab%';")
|
||||
self.check_view_num(2)
|
||||
tdSql.error("create view v3 as select * from tb where c1 like '%ab%';", expectErrInfo='Invalid operation')
|
||||
tdSql.execute("create view v3 as select first(ts), sum(c1) from tb group by c2 having avg(c4) > 0;")
|
||||
tdSql.execute("create view v4 as select _wstart,sum(c6) from tb interval(10s);")
|
||||
tdSql.execute("create view v5 as select * from tb join v2 on tb.ts = v2.ts;")
|
||||
tdSql.execute("create view v6 as select * from (select ts, c1, c2 from (select * from v2));")
|
||||
self.check_view_num(6)
|
||||
for v in ['v1', 'v2', 'v3', 'v4', 'v5', 'v6']:
|
||||
tdSql.execute(f"drop view {v};")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdLog.debug("Finish test case 'test_create_view_query'")
|
||||
|
||||
def test_show_view(self):
|
||||
"""This test case is used to verify the show view
|
||||
"""
|
||||
self.prepare_data()
|
||||
tdSql.execute(f"create view v1 as select * from {self.ctbname_list[0]};")
|
||||
|
||||
# query from show sql
|
||||
tdSql.query("show views;")
|
||||
res = tdSql.queryResult
|
||||
assert(res[0][0] == 'v1' and res[0][1] == 'view_db' and res[0][2] == 'root' and res[0][4] == 'NORMAL' and res[0][5] == 'select * from ct1;')
|
||||
|
||||
# show create sql
|
||||
tdSql.query("show create view v1;")
|
||||
res = tdSql.queryResult
|
||||
assert(res[0][1] == 'CREATE VIEW `view_db`.`v1` AS select * from ct1;')
|
||||
|
||||
# query from desc results
|
||||
tdSql.query("desc view_db.v1;")
|
||||
res = tdSql.queryResult
|
||||
assert(res[0][1] == 'TIMESTAMP' and res[1][1] == 'FLOAT' and res[2][1] == 'INT')
|
||||
|
||||
# query from system table
|
||||
tdSql.query("select * from information_schema.ins_views;")
|
||||
res = tdSql.queryResult
|
||||
assert(res[0][0] == 'v1' and res[0][1] == 'view_db' and res[0][2] == 'root' and res[0][4] == 'NORMAL' and res[0][5] == 'select * from ct1;')
|
||||
tdSql.error("show db3.views;", expectErrInfo='Database not exist')
|
||||
tdSql.error("desc viewx;", expectErrInfo='Table does not exist')
|
||||
tdSql.error(f"show create view {self.dbname}.viewx;", expectErrInfo='view not exists in db')
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdSql.error("show views;", expectErrInfo='Database not exist')
|
||||
tdLog.debug("Finish test case 'test_show_view'")
|
||||
|
||||
def test_drop_view(self):
|
||||
"""This test case is used to verify the drop view
|
||||
"""
|
||||
self.prepare_data()
|
||||
self.dbname = "view_db2"
|
||||
self.prepare_data()
|
||||
tdSql.execute("create view view_db.v1 as select * from view_db.stb;")
|
||||
tdSql.execute("create view view_db2.v1 as select * from view_db2.stb;")
|
||||
# delete view without database name
|
||||
tdSql.execute("drop view v1;")
|
||||
# delete view with database name
|
||||
tdSql.execute("drop view view_db.v1;")
|
||||
# delete non exist view
|
||||
tdSql.error("drop view view_db.v11;", expectErrInfo='view not exists in db')
|
||||
tdSql.execute("drop database view_db")
|
||||
tdSql.execute("drop database view_db2;")
|
||||
self.dbname = "view_db"
|
||||
tdLog.debug("Finish test case 'test_drop_view'")
|
||||
|
||||
def test_view_permission_db_all_view_all(self):
|
||||
"""This test case is used to verify the view permission with db all and view all,
|
||||
the time sleep to wait the permission take effect
|
||||
"""
|
||||
self.prepare_data()
|
||||
username = "view_test"
|
||||
password = "test123@#$"
|
||||
self.create_user(username, password)
|
||||
# grant all db permission to user
|
||||
tdSql.execute("grant all on view_db.* to view_test;")
|
||||
|
||||
conn = taos.connect(user=username, password=password)
|
||||
conn.execute(f"use {self.dbname};")
|
||||
conn.execute("create view v1 as select * from stb;")
|
||||
res = conn.query("show views;")
|
||||
assert(len(res.fetch_all()) == 1)
|
||||
tdLog.debug(f"Verify the show view permission of user '{username}' with db all and view all successfully")
|
||||
self.check_permissions("view_test", "view_db", {"db": ["read", "write"], "view": ["read", "write", "alter"]}, "v1")
|
||||
tdLog.debug(f"Verify the view permission from system table successfully")
|
||||
time.sleep(2)
|
||||
conn.execute("drop view v1;")
|
||||
tdSql.execute("revoke all on view_db.* from view_test;")
|
||||
tdSql.execute(f"drop database {self.dbname};")
|
||||
time.sleep(1)
|
||||
|
||||
# prepare data by user 'view_test'
|
||||
self.prepare_data(conn)
|
||||
|
||||
conn.execute("create view v1 as select * from stb;")
|
||||
res = conn.query("show views;")
|
||||
assert(len(res.fetch_all()) == 1)
|
||||
tdLog.debug(f"Verify the view permission of user '{username}' with db all and view all successfully")
|
||||
self.check_permissions("view_test", "view_db", {"db": ["read", "write"], "view": ["read", "write", "alter"]}, "v1")
|
||||
tdLog.debug(f"Verify the view permission from system table successfully")
|
||||
time.sleep(2)
|
||||
conn.execute("drop view v1;")
|
||||
tdSql.execute("revoke all on view_db.* from view_test;")
|
||||
tdSql.execute("revoke all on view_db.v1 from view_test;")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdSql.execute("drop user view_test;")
|
||||
tdLog.debug("Finish test case 'test_view_permission_db_all_view_all'")
|
||||
|
||||
def test_view_permission_db_write_view_all(self):
|
||||
"""This test case is used to verify the view permission with db write and view all
|
||||
"""
|
||||
username = "view_test"
|
||||
password = "test123@#$"
|
||||
self.create_user(username, password)
|
||||
conn = taos.connect(user=username, password=password)
|
||||
self.prepare_data(conn)
|
||||
conn.execute("create view v1 as select * from stb;")
|
||||
tdSql.execute("revoke read on view_db.* from view_test;")
|
||||
self.check_permissions("view_test", "view_db", {"db": ["write"], "view": ["read", "write", "alter"]}, "v1")
|
||||
# create view permission error
|
||||
try:
|
||||
conn.execute("create view v2 as select * from v1;")
|
||||
except Exception as ex:
|
||||
assert("[0x2644]: Permission denied or target object not exist" in str(ex))
|
||||
# query from view permission error
|
||||
try:
|
||||
conn.query("select * from v1;")
|
||||
except Exception as ex:
|
||||
assert("[0x2644]: Permission denied or target object not exist" in str(ex))
|
||||
# view query permission
|
||||
res = conn.query("show views;")
|
||||
assert(len(res.fetch_all()) == 1)
|
||||
time.sleep(2)
|
||||
conn.execute("drop view v1;")
|
||||
tdSql.execute("revoke write on view_db.* from view_test;")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdSql.execute("drop user view_test;")
|
||||
tdLog.debug("Finish test case 'test_view_permission_db_write_view_all'")
|
||||
|
||||
def test_view_permission_db_write_view_read(self):
|
||||
"""This test case is used to verify the view permission with db write and view read
|
||||
"""
|
||||
username = "view_test"
|
||||
password = "test123@#$"
|
||||
self.create_user(username, password)
|
||||
conn = taos.connect(user=username, password=password)
|
||||
self.prepare_data()
|
||||
|
||||
tdSql.execute("create view v1 as select * from stb;")
|
||||
tdSql.execute("grant write on view_db.* to view_test;")
|
||||
tdSql.execute("grant read on view_db.v1 to view_test;")
|
||||
|
||||
conn.execute(f"use {self.dbname};")
|
||||
time.sleep(2)
|
||||
res = conn.query("select * from v1;")
|
||||
assert(len(res.fetch_all()) == 20)
|
||||
|
||||
conn.execute("create view v2 as select * from v1;")
|
||||
# create view from super table of database
|
||||
try:
|
||||
conn.execute("create view v3 as select * from stb;")
|
||||
except Exception as ex:
|
||||
assert("[0x2644]: Permission denied or target object not exist" in str(ex))
|
||||
time.sleep(2)
|
||||
conn.execute("drop view v2;")
|
||||
try:
|
||||
conn.execute("drop view v1;")
|
||||
except Exception as ex:
|
||||
assert("[0x2644]: Permission denied or target object not exist" in str(ex))
|
||||
tdSql.execute("revoke read on view_db.v1 from view_test;")
|
||||
tdSql.execute("revoke write on view_db.* from view_test;")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdSql.execute("drop user view_test;")
|
||||
tdLog.debug("Finish test case 'test_view_permission_db_write_view_read'")
|
||||
|
||||
def test_view_permission_db_write_view_alter(self):
|
||||
"""This test case is used to verify the view permission with db write and view alter
|
||||
"""
|
||||
username = "view_test"
|
||||
password = "test123@#$"
|
||||
self.create_user(username, password)
|
||||
conn = taos.connect(user=username, password=password)
|
||||
self.prepare_data()
|
||||
|
||||
tdSql.execute("create view v1 as select * from stb;")
|
||||
tdSql.execute("grant write on view_db.* to view_test;")
|
||||
tdSql.execute("grant alter on view_db.v1 to view_test;")
|
||||
try:
|
||||
conn.execute(f"use {self.dbname};")
|
||||
conn.execute("select * from v1;")
|
||||
except Exception as ex:
|
||||
assert("[0x2644]: Permission denied or target object not exist" in str(ex))
|
||||
time.sleep(2)
|
||||
conn.execute("drop view v1;")
|
||||
tdSql.execute("revoke write on view_db.* from view_test;")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdSql.execute("drop user view_test;")
|
||||
tdLog.debug("Finish test case 'test_view_permission_db_write_view_alter'")
|
||||
|
||||
def test_view_permission_db_read_view_all(self):
|
||||
"""This test case is used to verify the view permission with db read and view all
|
||||
"""
|
||||
username = "view_test"
|
||||
password = "test123@#$"
|
||||
self.create_user(username, password)
|
||||
conn = taos.connect(user=username, password=password)
|
||||
self.prepare_data()
|
||||
|
||||
tdSql.execute("create view v1 as select * from stb;")
|
||||
tdSql.execute("grant read on view_db.* to view_test;")
|
||||
tdSql.execute("grant all on view_db.v1 to view_test;")
|
||||
try:
|
||||
conn.execute(f"use {self.dbname};")
|
||||
conn.execute("create view v2 as select * from v1;")
|
||||
except Exception as ex:
|
||||
assert("[0x2644]: Permission denied or target object not exist" in str(ex))
|
||||
time.sleep(2)
|
||||
res = conn.query("select * from v1;")
|
||||
assert(len(res.fetch_all()) == 20)
|
||||
conn.execute("drop view v1;")
|
||||
tdSql.execute("revoke read on view_db.* from view_test;")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdSql.execute("drop user view_test;")
|
||||
tdLog.debug("Finish test case 'test_view_permission_db_read_view_all'")
|
||||
|
||||
def test_view_permission_db_read_view_alter(self):
|
||||
"""This test case is used to verify the view permission with db read and view alter
|
||||
"""
|
||||
username = "view_test"
|
||||
password = "test123@#$"
|
||||
self.create_user(username, password)
|
||||
conn = taos.connect(user=username, password=password)
|
||||
self.prepare_data()
|
||||
|
||||
tdSql.execute("create view v1 as select * from stb;")
|
||||
tdSql.execute("grant read on view_db.* to view_test;")
|
||||
tdSql.execute("grant alter on view_db.v1 to view_test;")
|
||||
try:
|
||||
conn.execute(f"use {self.dbname};")
|
||||
conn.execute("select * from v1;")
|
||||
except Exception as ex:
|
||||
assert("[0x2644]: Permission denied or target object not exist" in str(ex))
|
||||
|
||||
time.sleep(2)
|
||||
conn.execute("drop view v1;")
|
||||
tdSql.execute("revoke read on view_db.* from view_test;")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdSql.execute("drop user view_test;")
|
||||
tdLog.debug("Finish test case 'test_view_permission_db_read_view_alter'")
|
||||
|
||||
def test_view_permission_db_read_view_read(self):
|
||||
"""This test case is used to verify the view permission with db read and view read
|
||||
"""
|
||||
username = "view_test"
|
||||
password = "test123@#$"
|
||||
self.create_user(username, password)
|
||||
conn = taos.connect(user=username, password=password)
|
||||
self.prepare_data()
|
||||
|
||||
tdSql.execute("create view v1 as select * from stb;")
|
||||
tdSql.execute("grant read on view_db.* to view_test;")
|
||||
tdSql.execute("grant read on view_db.v1 to view_test;")
|
||||
conn.execute(f"use {self.dbname};")
|
||||
time.sleep(2)
|
||||
res = conn.query("select * from v1;")
|
||||
assert(len(res.fetch_all()) == 20)
|
||||
try:
|
||||
conn.execute("drop view v1;")
|
||||
except Exception as ex:
|
||||
assert("[0x2644]: Permission denied or target object not exist" in str(ex))
|
||||
tdSql.execute("revoke read on view_db.* from view_test;")
|
||||
tdSql.execute("revoke read on view_db.v1 from view_test;")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdSql.execute("drop user view_test;")
|
||||
tdLog.debug("Finish test case 'test_view_permission_db_read_view_read'")
|
||||
|
||||
def test_query_from_view(self):
|
||||
"""This test case is used to verify the query from view
|
||||
"""
|
||||
self.prepare_data()
|
||||
view_name_list = []
|
||||
|
||||
# common query from super table
|
||||
tdSql.execute(f"create view v1 as select * from {self.stbname};")
|
||||
tdSql.query(f"select * from v1;")
|
||||
rows = tdSql.queryRows
|
||||
assert(rows == 20)
|
||||
view_name_list.append("v1")
|
||||
tdLog.debug("Verify the query from super table successfully")
|
||||
|
||||
# common query from child table
|
||||
tdSql.execute(f"create view v2 as select * from {self.ctbname_list[0]};")
|
||||
tdSql.query(f"select * from v2;")
|
||||
rows = tdSql.queryRows
|
||||
assert(rows == 10)
|
||||
view_name_list.append("v2")
|
||||
tdLog.debug("Verify the query from child table successfully")
|
||||
|
||||
# join query
|
||||
tdSql.execute(f"create view v3 as select * from {self.stbname} join {self.ctbname_list[1]} on {self.ctbname_list[1]}.ts = {self.stbname}.ts;")
|
||||
tdSql.query(f"select * from v3;")
|
||||
rows = tdSql.queryRows
|
||||
assert(rows == 10)
|
||||
view_name_list.append("v3")
|
||||
tdLog.debug("Verify the join query successfully")
|
||||
|
||||
# group by query
|
||||
tdSql.execute(f"create view v4 as select count(*) from {self.stbname} group by tbname;")
|
||||
tdSql.query(f"select * from v4;")
|
||||
rows = tdSql.queryRows
|
||||
assert(rows == 2)
|
||||
res = tdSql.queryResult
|
||||
assert(res[0][0] == 10)
|
||||
view_name_list.append("v4")
|
||||
tdLog.debug("Verify the group by query successfully")
|
||||
|
||||
# partition by query
|
||||
tdSql.execute(f"create view v5 as select sum(col1) from {self.stbname} where col2 > 4 partition by tbname interval(3s);")
|
||||
tdSql.query(f"select * from v5;")
|
||||
rows = tdSql.queryRows
|
||||
assert(rows >= 4)
|
||||
view_name_list.append("v5")
|
||||
tdLog.debug("Verify the partition by query successfully")
|
||||
|
||||
# query from nested view
|
||||
tdSql.execute(f"create view v6 as select * from v5;")
|
||||
tdSql.query(f"select * from v6;")
|
||||
rows = tdSql.queryRows
|
||||
assert(rows >= 4)
|
||||
view_name_list.append("v6")
|
||||
tdLog.debug("Verify the query from nested view successfully")
|
||||
|
||||
# delete view
|
||||
for view in view_name_list:
|
||||
tdSql.execute(f"drop view {view};")
|
||||
tdLog.debug(f"Drop view {view} successfully")
|
||||
tdSql.execute(f"drop database {self.dbname}")
|
||||
tdLog.debug("Finish test case 'test_query_from_view'")
|
||||
|
||||
def test_tmq_from_view(self):
|
||||
"""This test case is used to verify the tmq consume data from view
|
||||
"""
|
||||
# params for db
|
||||
paraDict = {'dbName': 'view_db',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 10,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
# topic info
|
||||
topic_name_list = ['topic1']
|
||||
view_name_list = ['view1']
|
||||
expectRowsList = []
|
||||
|
||||
self.prepare_tmq_data(paraDict)
|
||||
|
||||
# init consume info, and start tmq_sim, then check consume result
|
||||
tmqCom.initConsumerTable()
|
||||
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
||||
tdSql.execute(f"create view {view_name_list[0]} as {queryString}")
|
||||
sqlString = "create topic %s as %s" %(topic_name_list[0], "select * from %s"%view_name_list[0])
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
tdSql.query(queryString)
|
||||
expectRowsList.append(tdSql.getRows())
|
||||
|
||||
consumerId = 1
|
||||
topicList = topic_name_list[0]
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
|
||||
ifcheckdata = 1
|
||||
ifManualCommit = 1
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt, topicList, keyList, ifcheckdata, ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(paraDict['pollDelay'], paraDict["dbName"], paraDict['showMsg'], paraDict['showRow'])
|
||||
|
||||
tdLog.info("wait the consume result")
|
||||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
if expectRowsList[0] != resultList[0]:
|
||||
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
|
||||
tdLog.exit("1 tmq consume rows error!")
|
||||
|
||||
tmqCom.checkFileContent(consumerId, queryString)
|
||||
|
||||
time.sleep(10)
|
||||
for i in range(len(topic_name_list)):
|
||||
tdSql.query("drop topic %s"%topic_name_list[i])
|
||||
for i in range(len(view_name_list)):
|
||||
tdSql.query("drop view %s"%view_name_list[i])
|
||||
|
||||
# drop database
|
||||
tdSql.execute(f"drop database {paraDict['dbName']}")
|
||||
tdSql.execute("drop database cdb;")
|
||||
tdLog.debug("Finish test case 'test_tmq_from_view'")
|
||||
def test_TD_33390(self):
|
||||
tdSql.execute('create database test')
|
||||
tdSql.execute('create table test.nt(ts timestamp, c1 int)')
|
||||
for i in range(0, 200):
|
||||
tdSql.execute(f'create view test.view{i} as select * from test.nt')
|
||||
tdSql.query("show test.views")
|
||||
|
||||
for i in range(0, 200):
|
||||
tdSql.execute(f'drop view test.view{i}')
|
||||
|
||||
def run(self):
|
||||
self.test_TD_33390()
|
||||
self.test_create_view_from_one_database()
|
||||
self.test_create_view_from_multi_database()
|
||||
self.test_create_view_name_params()
|
||||
self.test_create_view_query()
|
||||
self.test_show_view()
|
||||
self.test_drop_view()
|
||||
self.test_view_permission_db_all_view_all()
|
||||
self.test_view_permission_db_write_view_all()
|
||||
self.test_view_permission_db_write_view_read()
|
||||
self.test_view_permission_db_write_view_alter()
|
||||
self.test_view_permission_db_read_view_all()
|
||||
self.test_view_permission_db_read_view_alter()
|
||||
self.test_view_permission_db_read_view_read()
|
||||
self.test_query_from_view()
|
||||
self.test_tmq_from_view()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,155 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import taos
|
||||
from taos.tmq import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.sqlset import *
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.setsql = TDSetSql()
|
||||
self.stbname = 'stb'
|
||||
self.user_name = 'test'
|
||||
self.binary_length = 20 # the length of binary for column_dict
|
||||
self.nchar_length = 20 # the length of nchar for column_dict
|
||||
self.dbnames = ['db1', 'db2']
|
||||
self.column_dict = {
|
||||
'ts': 'timestamp',
|
||||
'col1': 'float',
|
||||
'col2': 'int',
|
||||
'col3': 'float',
|
||||
}
|
||||
|
||||
self.tag_dict = {
|
||||
't1': 'int',
|
||||
't2': f'binary({self.binary_length})'
|
||||
}
|
||||
|
||||
self.tag_list = [
|
||||
f'1, "Beijing"',
|
||||
f'2, "Shanghai"',
|
||||
f'3, "Guangzhou"',
|
||||
f'4, "Shenzhen"'
|
||||
]
|
||||
|
||||
self.values_list = [
|
||||
f'now, 9.1, 200, 0.3'
|
||||
]
|
||||
|
||||
self.tbnum = 4
|
||||
self.stbnum_grant = 200
|
||||
|
||||
def create_user(self):
|
||||
tdSql.execute(f'create user {self.user_name} pass "test123@#$"')
|
||||
tdSql.execute(f'grant read on {self.dbnames[0]}.{self.stbname} with t2 = "Beijing" to {self.user_name}')
|
||||
tdSql.execute(f'grant write on {self.dbnames[1]}.{self.stbname} with t1 = 2 to {self.user_name}')
|
||||
|
||||
def prepare_data(self):
|
||||
for db in self.dbnames:
|
||||
tdSql.execute(f"create database {db}")
|
||||
tdSql.execute(f"use {db}")
|
||||
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict))
|
||||
for i in range(self.tbnum):
|
||||
tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})')
|
||||
for j in self.values_list:
|
||||
tdSql.execute(f'insert into {self.stbname}_{i} values({j})')
|
||||
for i in range(self.stbnum_grant):
|
||||
tdSql.execute(f'create table {self.stbname}_grant_{i} (ts timestamp, c0 int) tags(t0 int)')
|
||||
|
||||
def user_read_privilege_check(self, dbname):
|
||||
testconn = taos.connect(user='test', password='test123@#$')
|
||||
expectErrNotOccured = False
|
||||
|
||||
try:
|
||||
sql = f"select count(*) from {dbname}.stb where t2 = 'Beijing'"
|
||||
res = testconn.query(sql)
|
||||
data = res.fetch_all()
|
||||
count = data[0][0]
|
||||
except BaseException:
|
||||
expectErrNotOccured = True
|
||||
|
||||
if expectErrNotOccured:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured")
|
||||
elif count != 1:
|
||||
tdLog.exit(f"{sql}, expect result doesn't match")
|
||||
pass
|
||||
|
||||
def user_write_privilege_check(self, dbname):
|
||||
testconn = taos.connect(user='test', password='test123@#$')
|
||||
expectErrNotOccured = False
|
||||
|
||||
try:
|
||||
sql = f"insert into {dbname}.stb_1 values(now, 1.1, 200, 0.3)"
|
||||
testconn.execute(sql)
|
||||
except BaseException:
|
||||
expectErrNotOccured = True
|
||||
|
||||
if expectErrNotOccured:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured")
|
||||
else:
|
||||
pass
|
||||
|
||||
def user_privilege_error_check(self):
|
||||
testconn = taos.connect(user='test', password='test123@#$')
|
||||
expectErrNotOccured = False
|
||||
|
||||
sql_list = [f"alter talbe {self.dbnames[0]}.stb_1 set t2 = 'Wuhan'",
|
||||
f"insert into {self.dbnames[0]}.stb_1 values(now, 1.1, 200, 0.3)",
|
||||
f"drop table {self.dbnames[0]}.stb_1",
|
||||
f"select count(*) from {self.dbnames[1]}.stb"]
|
||||
|
||||
for sql in sql_list:
|
||||
try:
|
||||
res = testconn.execute(sql)
|
||||
except BaseException:
|
||||
expectErrNotOccured = True
|
||||
|
||||
if expectErrNotOccured:
|
||||
pass
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured")
|
||||
pass
|
||||
|
||||
def user_privilege_grant_check(self):
|
||||
for db in self.dbnames:
|
||||
tdSql.execute(f"use {db}")
|
||||
for i in range(self.stbnum_grant):
|
||||
tdSql.execute(f'grant read on {db}.{self.stbname}_grant_{i} to {self.user_name}')
|
||||
tdSql.execute(f'grant write on {db}.{self.stbname}_grant_{i} to {self.user_name}')
|
||||
|
||||
def run(self):
|
||||
self.prepare_data()
|
||||
self.create_user()
|
||||
self.user_read_privilege_check(self.dbnames[0])
|
||||
self.user_write_privilege_check(self.dbnames[1])
|
||||
self.user_privilege_error_check()
|
||||
self.user_privilege_grant_check()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,388 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
|
||||
import taos
|
||||
import frame
|
||||
import frame.etool
|
||||
|
||||
|
||||
from frame.log import *
|
||||
from frame.cases import *
|
||||
from frame.sql import *
|
||||
from frame.caseBase import *
|
||||
from frame import *
|
||||
from frame.autogen import *
|
||||
|
||||
|
||||
class TDTestCase(TBase):
|
||||
updatecfgDict = {
|
||||
"compressMsgSize" : "100",
|
||||
}
|
||||
# compress
|
||||
compresses = ["lz4","zlib","zstd","disabled","xz"]
|
||||
|
||||
compressDefaultDict = {};
|
||||
compressDefaultDict["BOOL"] = "zstd"
|
||||
compressDefaultDict["TINYINT"] = "zlib"
|
||||
compressDefaultDict["SMALLINT"] = "zlib"
|
||||
compressDefaultDict["INT"] = "lz4"
|
||||
compressDefaultDict["BIGINT"] = "lz4"
|
||||
compressDefaultDict["FLOAT"] = "lz4"
|
||||
compressDefaultDict["DOUBLE"] = "lz4"
|
||||
compressDefaultDict["VARCHAR"] = "zstd"
|
||||
compressDefaultDict["TIMESTAMP"] = "lz4"
|
||||
compressDefaultDict["NCHAR"] = "zstd"
|
||||
compressDefaultDict["TINYINT UNSIGNED"] = "zlib"
|
||||
compressDefaultDict["SMALLINT UNSIGNED"] = "zlib"
|
||||
compressDefaultDict["INT UNSIGNED"] = "lz4"
|
||||
compressDefaultDict["BIGINT UNSIGNED"] = "lz4"
|
||||
compressDefaultDict["NCHAR"] = "zstd"
|
||||
compressDefaultDict["BLOB"] = "lz4"
|
||||
compressDefaultDict["VARBINARY"] = "zstd"
|
||||
|
||||
# level
|
||||
levels = ["high","medium","low"]
|
||||
|
||||
# default compress
|
||||
defCompress = "lz4"
|
||||
# default level
|
||||
defLevel = "medium"
|
||||
|
||||
# datatype 17
|
||||
dtypes = [ "tinyint","tinyint unsigned","smallint","smallint unsigned","int","int unsigned",
|
||||
"bigint","bigint unsigned","timestamp","bool","float","double","binary(16)","nchar(16)",
|
||||
"varchar(16)","varbinary(16)"]
|
||||
|
||||
# encode
|
||||
encodes = [
|
||||
[["tinyint","tinyint unsigned","smallint","smallint unsigned","int","int unsigned","bigint","bigint unsigned"], ["simple8B"]],
|
||||
[["timestamp","bigint","bigint unsigned"], ["Delta-i"]],
|
||||
[["bool"], ["Bit-packing"]],
|
||||
[["float","double"], ["Delta-d"]]
|
||||
]
|
||||
|
||||
|
||||
def combineValid(self, datatype, encode, compress):
|
||||
if datatype != "float" and datatype != "double":
|
||||
if compress == "tsz":
|
||||
return False
|
||||
return True
|
||||
|
||||
def genAllSqls(self, stbName, max):
|
||||
|
||||
c = 0 # column number
|
||||
t = 0 # table number
|
||||
|
||||
sqls = []
|
||||
sql = ""
|
||||
|
||||
# loop append sqls
|
||||
for lines in self.encodes:
|
||||
for datatype in lines[0]:
|
||||
for encode in lines[1]:
|
||||
for compress in self.compresses:
|
||||
for level in self.levels:
|
||||
if sql == "":
|
||||
# first
|
||||
sql = f"create table {self.db}.st{t} (ts timestamp"
|
||||
else:
|
||||
if self.combineValid(datatype, encode, compress):
|
||||
sql += f", c{c} {datatype} ENCODE '{encode}' COMPRESS '{compress}' LEVEL '{level}'"
|
||||
c += 1
|
||||
|
||||
if c >= max:
|
||||
# append sqls
|
||||
sql += f") tags(groupid int) "
|
||||
sqls.append(sql)
|
||||
# reset
|
||||
sql = ""
|
||||
c = 0
|
||||
t += 1
|
||||
|
||||
# break loop
|
||||
if c > 0:
|
||||
# append sqls
|
||||
sql += f") tags(groupid int) "
|
||||
sqls.append(sql)
|
||||
|
||||
return sqls
|
||||
|
||||
# check error create
|
||||
def errorCreate(self):
|
||||
sqls = [
|
||||
f"create table terr(ts timestamp, c0 int ENCODE 'simple8B' COMPRESS 'tsz' LEVEL 'high') ",
|
||||
f"create table terr(ts timestamp, bi bigint encode 'bit-packing') tags (area int);"
|
||||
f"create table terr(ts timestamp, ic int encode 'delta-d') tags (area int);"
|
||||
]
|
||||
tdSql.errors(sqls)
|
||||
|
||||
for dtype in self.dtypes:
|
||||
# encode
|
||||
sql = f"create table terr(ts timestamp, c0 {dtype} ENCODE 'abc') "
|
||||
tdSql.error(sql)
|
||||
# compress
|
||||
sql = f"create table terr(ts timestamp, c0 {dtype} COMPRESS 'def') "
|
||||
tdSql.error(sql)
|
||||
# level
|
||||
sql = f"create table terr(ts timestamp, c0 {dtype} LEVEL 'hig') "
|
||||
tdSql.error(sql)
|
||||
|
||||
# tsz check
|
||||
if dtype != "float" and dtype != "double":
|
||||
sql = f"create table terr(ts timestamp, c0 {dtype} COMPRESS 'tsz') "
|
||||
tdSql.error(sql)
|
||||
|
||||
# default value correct
|
||||
def defaultCorrect(self):
|
||||
# get default encode compress level
|
||||
sql = f"describe {self.db}.{self.stb}"
|
||||
tdSql.query(sql)
|
||||
|
||||
# see AutoGen.types
|
||||
defEncodes = [ "delta-i","delta-i","simple8b","simple8b","simple8b","simple8b","simple8b","simple8b",
|
||||
"simple8b","simple8b","delta-d","delta-d","bit-packing",
|
||||
"disabled","disabled","disabled","disabled"]
|
||||
|
||||
count = tdSql.getRows()
|
||||
for i in range(count):
|
||||
node = tdSql.getData(i, 3)
|
||||
if node == "TAG":
|
||||
break
|
||||
# check
|
||||
tdLog.info(f"check default encode {tdSql.getData(i, 1)}")
|
||||
#tdLog.info(f"check default encode compressDefaultDict[tdSql.getData(i, 2)]")
|
||||
defaultValue = self.compressDefaultDict[tdSql.getData(i, 1)]
|
||||
if defaultValue == None:
|
||||
defaultValue = self.defCompress
|
||||
tdLog.info(f"check default compress {tdSql.getData(i, 1)} {defaultValue}")
|
||||
tdSql.checkData(i, 5, defaultValue)
|
||||
tdSql.checkData(i, 6, self.defLevel)
|
||||
|
||||
# geometry encode is disabled
|
||||
sql = f"create table {self.db}.ta(ts timestamp, pos geometry(64)) "
|
||||
tdSql.execute(sql)
|
||||
sql = f"describe {self.db}.ta"
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(1, 4, "disabled")
|
||||
|
||||
tdLog.info("check default encode compress and level successfully.")
|
||||
|
||||
def checkDataDesc(self, tbname, row, col, value):
|
||||
sql = f"describe {tbname}"
|
||||
tdSql.query(sql)
|
||||
tdSql.checkData(row, col, value)
|
||||
|
||||
|
||||
def writeData(self, count):
|
||||
self.autoGen.insert_data(count, True)
|
||||
|
||||
# alter encode compress level
|
||||
def checkAlter(self):
|
||||
tbname = f"{self.db}.{self.stb}"
|
||||
# alter encode 4
|
||||
comp = "delta-i"
|
||||
sql = f"alter table {tbname} modify column c7 ENCODE '{comp}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, 8, 4, comp)
|
||||
self.writeData(1000)
|
||||
sql = f"alter table {tbname} modify column c8 ENCODE '{comp}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, 9, 4, comp)
|
||||
self.writeData(1000)
|
||||
|
||||
# alter compress 5
|
||||
comps = self.compresses[2:]
|
||||
comps.append(self.compresses[0]) # add lz4
|
||||
for comp in comps:
|
||||
for i in range(self.colCnt - 1):
|
||||
self.writeData(1000)
|
||||
|
||||
# alter float(c9) double(c10) to tsz
|
||||
comp = "tsz"
|
||||
sql = f"alter table {tbname} modify column c9 COMPRESS '{comp}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, 10, 5, comp)
|
||||
self.writeData(10000)
|
||||
sql = f"alter table {tbname} modify column c10 COMPRESS '{comp}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, 11, 5, comp)
|
||||
self.writeData(10000)
|
||||
|
||||
# alter level 6
|
||||
for level in self.levels:
|
||||
for i in range(self.colCnt - 1):
|
||||
col = f"c{i}"
|
||||
sql = f"alter table {tbname} modify column {col} LEVEL '{level}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, i + 1, 6, level)
|
||||
self.writeData(1000)
|
||||
|
||||
# modify two combine
|
||||
|
||||
|
||||
i = 9
|
||||
encode = "delta-d"
|
||||
compress = "zlib"
|
||||
sql = f"alter table {tbname} modify column c{i} ENCODE '{encode}' COMPRESS '{compress}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, i + 1, 4, encode)
|
||||
self.checkDataDesc(tbname, i + 1, 5, compress)
|
||||
|
||||
i = 10
|
||||
encode = "delta-d"
|
||||
level = "high"
|
||||
sql = f"alter table {tbname} modify column c{i} ENCODE '{encode}' LEVEL '{level}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, i + 1, 4, encode)
|
||||
self.checkDataDesc(tbname, i + 1, 6, level)
|
||||
|
||||
i = 2
|
||||
compress = "zlib"
|
||||
level = "high"
|
||||
sql = f"alter table {tbname} modify column c{i} COMPRESS '{compress}' LEVEL '{level}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, i + 1, 5, compress)
|
||||
self.checkDataDesc(tbname, i + 1, 6, level)
|
||||
|
||||
# modify three combine
|
||||
i = 7
|
||||
encode = "simple8b"
|
||||
compress = "zstd"
|
||||
level = "medium"
|
||||
sql = f"alter table {tbname} modify column c{i} ENCODE '{encode}' COMPRESS '{compress}' LEVEL '{level}';"
|
||||
tdSql.execute(sql, show=True)
|
||||
self.checkDataDesc(tbname, i + 1, 4, encode)
|
||||
self.checkDataDesc(tbname, i + 1, 5, compress)
|
||||
self.checkDataDesc(tbname, i + 1, 6, level)
|
||||
|
||||
# alter error
|
||||
sqls = [
|
||||
"alter table nodb.nostb modify column ts LEVEL 'high';",
|
||||
"alter table db.stb modify column ts encode 'simple8b';",
|
||||
"alter table db.stb modify column c1 compress 'errorcompress';",
|
||||
"alter table db.stb modify column c2 level 'errlevel';",
|
||||
"alter table db.errstb modify column c3 compress 'xz';"
|
||||
]
|
||||
tdSql.errors(sqls)
|
||||
|
||||
# add column
|
||||
def checkAddColumn(self):
|
||||
c = 0
|
||||
tbname = f"{self.db}.tbadd"
|
||||
sql = f"create table {tbname}(ts timestamp, c0 int) tags(area int);"
|
||||
tdSql.execute(sql)
|
||||
|
||||
# loop append sqls
|
||||
for lines in self.encodes:
|
||||
for datatype in lines[0]:
|
||||
for encode in lines[1]:
|
||||
for compress in self.compresses:
|
||||
for level in self.levels:
|
||||
if self.combineValid(datatype, encode, compress):
|
||||
sql = f"alter table {tbname} add column col{c} {datatype} ENCODE '{encode}' COMPRESS '{compress}' LEVEL '{level}';"
|
||||
tdSql.execute(sql, 3, True)
|
||||
c += 1
|
||||
|
||||
# alter error
|
||||
sqls = [
|
||||
f"alter table {tbname} add column a1 int ONLYOPTION",
|
||||
f"alter table {tbname} add column a1 int 'simple8b';",
|
||||
f"alter table {tbname} add column a1 int WRONG 'simple8b';",
|
||||
f"alter table {tbname} add column a1 int 123456789 'simple8b';",
|
||||
f"alter table {tbname} add column a1 int WRONGANDVERYLONG 'simple8b';",
|
||||
f"alter table {tbname} add column a1 int ENCODE 'veryveryveryveryveryverylong';",
|
||||
f"alter table {tbname} add column a1 int ENCODE 'simple8bAA';",
|
||||
f"alter table {tbname} add column a2 int COMPRESS 'AABB';",
|
||||
f"alter table {tbname} add column a3 bigint LEVEL 'high1';",
|
||||
f"alter table {tbname} add column a4 BINARY(12) ENCODE 'simple8b' LEVEL 'high2';",
|
||||
f"alter table {tbname} add column a5 VARCHAR(16) ENCODE 'simple8b' COMPRESS 'gzip' LEVEL 'high3';"
|
||||
]
|
||||
tdSql.errors(sqls)
|
||||
|
||||
def validCreate(self):
|
||||
sqls = self.genAllSqls(self.stb, 50)
|
||||
tdSql.executes(sqls, show=True)
|
||||
|
||||
# sql syntax
|
||||
def checkSqlSyntax(self):
|
||||
|
||||
# create tables positive
|
||||
self.validCreate()
|
||||
|
||||
# create table negtive
|
||||
self.errorCreate()
|
||||
|
||||
# check default value corrent
|
||||
self.defaultCorrect()
|
||||
|
||||
# check alter and write
|
||||
self.checkAlter()
|
||||
|
||||
# check add column
|
||||
self.checkAddColumn()
|
||||
|
||||
def checkCorrect(self):
|
||||
# check data correct
|
||||
tbname = f"{self.db}.{self.stb}"
|
||||
# count
|
||||
sql = f"select count(*) from {tbname}"
|
||||
count = tdSql.getFirstValue(sql)
|
||||
step = 100000
|
||||
offset = 0
|
||||
|
||||
while offset < count:
|
||||
sql = f"select * from {tbname} limit {step} offset {offset}"
|
||||
tdLog.info(sql)
|
||||
tdSql.query(sql)
|
||||
self.autoGen.dataCorrect(tdSql.res, tdSql.getRows(), step)
|
||||
offset += step
|
||||
tdLog.info(f"check data correct rows={offset}")
|
||||
|
||||
tdLog.info(F"check {tbname} rows {count} data correct successfully.")
|
||||
|
||||
|
||||
# run
|
||||
def run(self):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
||||
# create db and stable
|
||||
self.autoGen = AutoGen(step = 10, genDataMode = "fillts")
|
||||
self.autoGen.create_db(self.db, 2, 3)
|
||||
tdSql.execute(f"use {self.db}")
|
||||
self.colCnt = 17
|
||||
self.autoGen.create_stable(self.stb, 5, self.colCnt, 32, 32)
|
||||
self.childCnt = 4
|
||||
self.autoGen.create_child(self.stb, "d", self.childCnt)
|
||||
self.autoGen.insert_data(1000)
|
||||
|
||||
# sql syntax
|
||||
self.checkSqlSyntax()
|
||||
|
||||
# operateor
|
||||
self.writeData(1000)
|
||||
self.flushDb()
|
||||
self.writeData(1000)
|
||||
|
||||
# check corrent
|
||||
self.checkCorrect()
|
||||
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,235 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_interval(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, case_when=None):
|
||||
tdLog.info(f"*** testing stream at_once+interval: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, fill: {fill_value}, delete: {delete}, case_when: {case_when} ***")
|
||||
col_value_type = "Incremental" if partition=="c1" else "random"
|
||||
custom_col_index = 1 if partition=="c1" else None
|
||||
self.tdCom.custom_col_val = 0
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value, custom_col_index=custom_col_index, col_value_type=col_value_type)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_col_alias
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
elif partition is None:
|
||||
partition_elm_alias = '"no_partition"'
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname" or partition is None:
|
||||
if case_when:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
if partition:
|
||||
partition_elm = f'partition by {partition} {partition_elm_alias}'
|
||||
else:
|
||||
partition_elm = ""
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
start_time = self.tdCom.date_time
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
for i in range(self.tdCom.range_count):
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value, custom_col_index=custom_col_index, col_value_type=col_value_type)
|
||||
if i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value, custom_col_index=custom_col_index, col_value_type=col_value_type)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value, custom_col_index=custom_col_index, col_value_type=col_value_type)
|
||||
if i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value, custom_col_index=custom_col_index, col_value_type=col_value_type)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
if partition:
|
||||
partition_elm = f'partition by {partition}'
|
||||
else:
|
||||
partition_elm = ""
|
||||
|
||||
if not fill_value:
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
|
||||
if self.tdCom.subtable:
|
||||
for tname in [self.stb_name, self.ctb_name]:
|
||||
group_id = self.tdCom.get_group_id_from_stb(f'{tname}_output')
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}')
|
||||
tdSql.query(f'select count(*) from `{tbname}`')
|
||||
elif partition is None:
|
||||
tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}')
|
||||
tdSql.query(f'select count(*) from `{tbname}`')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
|
||||
tdSql.query(f'select count(*) from `{tbname}`')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}_{tname}_output_{group_id}')
|
||||
tdSql.query(f'select count(*) from `{tbname}`')
|
||||
ptn_counter += 1
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
group_id = self.tdCom.get_group_id_from_stb(f'{self.tb_name}_output')
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}')
|
||||
tdSql.query(f'select count(*) from `{tbname}`')
|
||||
elif partition is None:
|
||||
tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}')
|
||||
tdSql.query(f'select count(*) from `{tbname}`')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
|
||||
tdSql.query(f'select count(*) from `{tbname}`')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}_{self.tb_name}_output_{group_id}')
|
||||
tdSql.query(f'select count(*) from `{tbname}`')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
if fill_value:
|
||||
end_date_time = self.tdCom.date_time
|
||||
final_range_count = self.tdCom.range_count
|
||||
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||
future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
end_ts = self.tdCom.time_cast(future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||
self.tdCom.date_time = start_time
|
||||
# update
|
||||
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||
future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
end_ts = self.tdCom.time_cast(future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||
self.tdCom.date_time = start_time
|
||||
for i in range(self.tdCom.range_count):
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
self.tdCom.date_time += 1
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value)
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
else:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value)
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
else:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="c1", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="abs(c1)", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL")
|
||||
for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||
# for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,67 @@
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
import taos
|
||||
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
clientCfgDict = {'debugFlag': 135}
|
||||
updatecfgDict = {
|
||||
"debugFlag" : "135",
|
||||
"queryBufferSize" : 10240,
|
||||
'clientCfg' : clientCfgDict
|
||||
}
|
||||
|
||||
def init(self, conn, logSql, replicaVal=1):
|
||||
self.replicaVar = int(replicaVal)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.conn = conn
|
||||
tdSql.init(conn.cursor(), False)
|
||||
self.passwd = {'root':'taosdata',
|
||||
'test':'test'}
|
||||
|
||||
def prepare_anode_data(self):
|
||||
tdSql.execute(f"create anode '127.0.0.1:6090'")
|
||||
tdSql.execute(f"create database db_gpt")
|
||||
tdSql.execute(f"create table if not exists db_gpt.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);")
|
||||
tdSql.execute(f"create table db_gpt.ct1 using db_gpt.stb tags(1000);")
|
||||
tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now-1a, 5)(now+1a, 14)(now+2a, 15)(now+3a, 15)(now+4a, 14);")
|
||||
tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+5a, 19)(now+6a, 17)(now+7a, 16)(now+8a, 20)(now+9a, 22);")
|
||||
tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+10a, 8)(now+11a, 21)(now+12a, 28)(now+13a, 11)(now+14a, 9);")
|
||||
tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+15a, 29)(now+16a, 40);")
|
||||
|
||||
|
||||
def test_forecast(self):
|
||||
"""
|
||||
Test forecast
|
||||
"""
|
||||
tdLog.info(f"Test forecast")
|
||||
tdSql.query(f"SELECT _frowts, FORECAST(c1, \"algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5,d=1\") from db_gpt.ct1 ;")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
def test_anomaly_window(self):
|
||||
"""
|
||||
Test anomaly window
|
||||
"""
|
||||
tdLog.info(f"Test anomaly window")
|
||||
tdSql.query(f"SELECT _wstart, _wend, SUM(c1) FROM db_gpt.ct1 ANOMALY_WINDOW(c1, \"algo=iqr\");")
|
||||
tdSql.checkData(0,2,40)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.prepare_anode_data()
|
||||
self.test_forecast()
|
||||
self.test_anomaly_window()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
{% extends "function.html.jinja" %}
|
||||
|
||||
{% block logs scoped %}
|
||||
{{ super() }}
|
||||
{# TODO: Switch to a warning after some time. #}
|
||||
{{ log.info(
|
||||
"DeprecationWarning: Extending '_base/function.html' is deprecated, extend '_base/function.html.jinja' instead. " ~
|
||||
"After some time, this message will be logged as a warning, causing strict builds to fail.",
|
||||
once=True,
|
||||
) }}
|
||||
{% endblock logs %}
|
|
@ -0,0 +1,154 @@
|
|||
{#- Template for Python functions.
|
||||
|
||||
This template renders a Python function or method.
|
||||
|
||||
Context:
|
||||
function (griffe.Function): The function to render.
|
||||
root (bool): Whether this is the root object, injected with `:::` in a Markdown page.
|
||||
heading_level (int): The HTML heading level to use.
|
||||
config (dict): The configuration options.
|
||||
-#}
|
||||
|
||||
{% block logs scoped %}
|
||||
{#- Logging block.
|
||||
|
||||
This block can be used to log debug messages, deprecation messages, warnings, etc.
|
||||
-#}
|
||||
{{ log.debug("Rendering " + function.path) }}
|
||||
{% endblock logs %}
|
||||
|
||||
{% import "language"|get_template as lang with context %}
|
||||
{#- Language module providing the `t` translation method. -#}
|
||||
|
||||
<div class="doc doc-object doc-function">
|
||||
{% with obj = function, html_id = function.path %}
|
||||
|
||||
{% if root %}
|
||||
{% set show_full_path = config.show_root_full_path %}
|
||||
{% set root_members = True %}
|
||||
{% elif root_members %}
|
||||
{% set show_full_path = config.show_root_members_full_path or config.show_object_full_path %}
|
||||
{% set root_members = False %}
|
||||
{% else %}
|
||||
{% set show_full_path = config.show_object_full_path %}
|
||||
{% endif %}
|
||||
|
||||
{% set function_name = function.path if show_full_path else function.name %}
|
||||
{#- Brief or full function name depending on configuration. -#}
|
||||
{% set symbol_type = "method" if function.parent.is_class else "function" %}
|
||||
{#- Symbol type: method when parent is a class, function otherwise. -#}
|
||||
|
||||
{% if not root or config.show_root_heading %}
|
||||
{% filter heading(
|
||||
heading_level,
|
||||
role="function",
|
||||
id=html_id,
|
||||
class="doc doc-heading",
|
||||
toc_label=(('<code class="doc-symbol doc-symbol-toc doc-symbol-' + symbol_type + '"></code> ')|safe if config.show_symbol_type_toc else '') + function.docstring.value.split("\n", 1)[0] if function.docstring.value.split("\n", 1)[0] else function.name,
|
||||
) %}
|
||||
|
||||
{% block heading scoped %}
|
||||
{#- Heading block.
|
||||
|
||||
This block renders the heading for the function.
|
||||
-#}
|
||||
{% if config.show_symbol_type_heading %}<code class="doc-symbol doc-symbol-heading doc-symbol-{{ symbol_type }}"></code>{% endif %}
|
||||
{% if config.separate_signature %}
|
||||
<span class="doc doc-object-name doc-function-name">{{ config.heading if config.heading and root else function_name }}</span>
|
||||
{% else %}
|
||||
{%+ filter highlight(language="python", inline=True) %}
|
||||
{{ function_name }}{% include "signature"|get_template with context %}
|
||||
{% endfilter %}
|
||||
{% endif %}
|
||||
{% endblock heading %}
|
||||
|
||||
{% block labels scoped %}
|
||||
{#- Labels block.
|
||||
|
||||
This block renders the labels for the function.
|
||||
-#}
|
||||
{% with labels = function.labels %}
|
||||
{% include "labels"|get_template with context %}
|
||||
{% endwith %}
|
||||
{% endblock labels %}
|
||||
|
||||
{% endfilter %}
|
||||
|
||||
{% block signature scoped %}
|
||||
{#- Signature block.
|
||||
|
||||
This block renders the signature for the function,
|
||||
as well as its overloaded signatures if any.
|
||||
-#}
|
||||
{% if function.overloads %}
|
||||
<div class="doc-overloads">
|
||||
{% for overload in function.overloads %}
|
||||
{% filter format_signature(overload, config.line_length, annotations=True, crossrefs=config.signature_crossrefs) %}
|
||||
{{ overload.name }}
|
||||
{% endfilter %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% if config.separate_signature %}
|
||||
{% filter format_signature(function, config.line_length, crossrefs=config.signature_crossrefs) %}
|
||||
{{ function.name }}
|
||||
{% endfilter %}
|
||||
{% endif %}
|
||||
{% endblock signature %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% if config.show_root_toc_entry %}
|
||||
{% filter heading(
|
||||
heading_level,
|
||||
role="function",
|
||||
id=html_id,
|
||||
toc_label=(('<code class="doc-symbol doc-symbol-toc doc-symbol-' + symbol_type + '"></code> ')|safe if config.show_symbol_type_toc else '') + (config.toc_label if config.toc_label and root else function.name),
|
||||
hidden=True,
|
||||
) %}
|
||||
{% endfilter %}
|
||||
{% endif %}
|
||||
{% set heading_level = heading_level - 1 %}
|
||||
{% endif %}
|
||||
|
||||
<div class="doc doc-contents {% if root %}first{% endif %}">
|
||||
{% block contents scoped %}
|
||||
{#- Contents block.
|
||||
|
||||
This block renders the contents of the function.
|
||||
It contains other blocks that users can override.
|
||||
Overriding the contents block allows to rearrange the order of the blocks.
|
||||
-#}
|
||||
{% block docstring scoped %}
|
||||
{#- Docstring block.
|
||||
|
||||
This block renders the docstring for the function.
|
||||
-#}
|
||||
{% with docstring_sections = function.docstring.parsed %}
|
||||
{% include "docstring"|get_template with context %}
|
||||
{% endwith %}
|
||||
{% endblock docstring %}
|
||||
|
||||
{% block source scoped %}
|
||||
{#- Source block.
|
||||
|
||||
This block renders the source code for the function.
|
||||
-#}
|
||||
{% if config.show_source and function.source %}
|
||||
<details class="quote">
|
||||
<summary>{{ lang.t("Source code in") }} <code>
|
||||
{%- if function.relative_filepath.is_absolute() -%}
|
||||
{{ function.relative_package_filepath }}
|
||||
{%- else -%}
|
||||
{{ function.relative_filepath }}
|
||||
{%- endif -%}
|
||||
</code></summary>
|
||||
{{ function.source|highlight(language="python", linestart=function.lineno or 0, linenums=True) }}
|
||||
</details>
|
||||
{% endif %}
|
||||
{% endblock source %}
|
||||
{% endblock contents %}
|
||||
</div>
|
||||
|
||||
{% endwith %}
|
||||
</div>
|
|
@ -0,0 +1,704 @@
|
|||
#!/usr/bin/python
|
||||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
# install pip
|
||||
# pip install src/connector/python/
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
import subprocess
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
import platform
|
||||
import socket
|
||||
import threading
|
||||
import importlib
|
||||
print(f"Python version: {sys.version}")
|
||||
print(f"Version info: {sys.version_info}")
|
||||
|
||||
import toml
|
||||
sys.path.append("../pytest")
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
from util.cases import *
|
||||
from util.cluster import *
|
||||
from util.taosadapter import *
|
||||
|
||||
import taos
|
||||
import taosrest
|
||||
import taosws
|
||||
|
||||
def checkRunTimeError():
|
||||
import win32gui
|
||||
timeCount = 0
|
||||
while 1:
|
||||
time.sleep(1)
|
||||
timeCount = timeCount + 1
|
||||
print("checkRunTimeError",timeCount)
|
||||
if (timeCount>1200):
|
||||
print("stop the test.")
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
os.system("TASKKILL /F /IM taos.exe")
|
||||
os.system("TASKKILL /F /IM tmq_sim.exe")
|
||||
os.system("TASKKILL /F /IM mintty.exe")
|
||||
os.system("TASKKILL /F /IM python.exe")
|
||||
quit(0)
|
||||
hwnd = win32gui.FindWindow(None, "Microsoft Visual C++ Runtime Library")
|
||||
if hwnd:
|
||||
os.system("TASKKILL /F /IM taosd.exe")
|
||||
|
||||
#
|
||||
# run case on previous cluster
|
||||
#
|
||||
def runOnPreviousCluster(host, config, fileName):
|
||||
print("enter run on previeous")
|
||||
|
||||
# load case module
|
||||
sep = "/"
|
||||
if platform.system().lower() == 'windows':
|
||||
sep = os.sep
|
||||
moduleName = fileName.replace(".py", "").replace(sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
case = uModule.TDTestCase()
|
||||
|
||||
# create conn
|
||||
conn = taos.connect(host, config)
|
||||
|
||||
# run case
|
||||
case.init(conn, False)
|
||||
try:
|
||||
case.run()
|
||||
except Exception as e:
|
||||
tdLog.notice(repr(e))
|
||||
tdLog.exit("%s failed" % (fileName))
|
||||
# stop
|
||||
case.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
#
|
||||
# analysis paramaters
|
||||
#
|
||||
fileName = "all"
|
||||
deployPath = ""
|
||||
masterIp = ""
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
killValgrind = 1
|
||||
logSql = True
|
||||
stop = 0
|
||||
restart = False
|
||||
dnodeNums = 1
|
||||
mnodeNums = 0
|
||||
updateCfgDict = {}
|
||||
adapter_cfg_dict = {}
|
||||
execCmd = ""
|
||||
queryPolicy = 1
|
||||
createDnodeNums = 1
|
||||
restful = False
|
||||
websocket = False
|
||||
replicaVar = 1
|
||||
asan = False
|
||||
independentMnode = False
|
||||
previousCluster = False
|
||||
crashGen = False
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP:G', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous',"crashGen"])
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
tdLog.printNoPrefix(
|
||||
'A collection of test cases written using Python')
|
||||
tdLog.printNoPrefix('-f Name of test case file written by Python')
|
||||
tdLog.printNoPrefix('-p Deploy Path for Simulator')
|
||||
tdLog.printNoPrefix('-m Master Ip for Simulator')
|
||||
tdLog.printNoPrefix('-l <True:False> logSql Flag')
|
||||
tdLog.printNoPrefix('-s stop All dnodes')
|
||||
tdLog.printNoPrefix('-c Test Cluster Flag')
|
||||
tdLog.printNoPrefix('-g valgrind Test Flag')
|
||||
tdLog.printNoPrefix('-r taosd restart test')
|
||||
tdLog.printNoPrefix('-d update cfg dict, base64 json str')
|
||||
tdLog.printNoPrefix('-k not kill valgrind processer')
|
||||
tdLog.printNoPrefix('-e eval str to run')
|
||||
tdLog.printNoPrefix('-N start dnodes numbers in clusters')
|
||||
tdLog.printNoPrefix('-M create mnode numbers in clusters')
|
||||
tdLog.printNoPrefix('-Q set queryPolicy in one dnode')
|
||||
tdLog.printNoPrefix('-C create Dnode Numbers in one cluster')
|
||||
tdLog.printNoPrefix('-R restful realization form')
|
||||
tdLog.printNoPrefix('-W websocket connection')
|
||||
tdLog.printNoPrefix('-D taosadapter update cfg dict ')
|
||||
tdLog.printNoPrefix('-n the number of replicas')
|
||||
tdLog.printNoPrefix('-i independentMnode Mnode')
|
||||
tdLog.printNoPrefix('-a address sanitizer mode')
|
||||
tdLog.printNoPrefix('-P run case with [P]revious cluster, do not create new cluster to run case.')
|
||||
tdLog.printNoPrefix('-G crashGen mode')
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-r', '--restart']:
|
||||
restart = True
|
||||
|
||||
if key in ['-f', '--file']:
|
||||
fileName = value
|
||||
|
||||
if key in ['-p', '--path']:
|
||||
deployPath = value
|
||||
|
||||
if key in ['-m', '--master']:
|
||||
masterIp = value
|
||||
|
||||
if key in ['-l', '--logSql']:
|
||||
if (value.upper() == "TRUE"):
|
||||
logSql = True
|
||||
elif (value.upper() == "FALSE"):
|
||||
logSql = False
|
||||
else:
|
||||
tdLog.printNoPrefix("logSql value %s is invalid" % logSql)
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-c', '--cluster']:
|
||||
testCluster = True
|
||||
|
||||
if key in ['-g', '--valgrind']:
|
||||
valgrind = 1
|
||||
|
||||
if key in ['-s', '--stop']:
|
||||
stop = 1
|
||||
|
||||
if key in ['-d', '--updateCfgDict']:
|
||||
try:
|
||||
updateCfgDict = eval(base64.b64decode(value.encode()).decode())
|
||||
except:
|
||||
print('updateCfgDict convert fail.')
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-k', '--killValgrind']:
|
||||
killValgrind = 1
|
||||
|
||||
if key in ['-e', '--execCmd']:
|
||||
try:
|
||||
execCmd = base64.b64decode(value.encode()).decode()
|
||||
except:
|
||||
print('execCmd run fail.')
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-N', '--dnodeNums']:
|
||||
dnodeNums = value
|
||||
|
||||
if key in ['-M', '--mnodeNums']:
|
||||
mnodeNums = value
|
||||
|
||||
if key in ['-Q', '--queryPolicy']:
|
||||
queryPolicy = value
|
||||
|
||||
if key in ['-C', '--createDnodeNums']:
|
||||
createDnodeNums = value
|
||||
|
||||
if key in ['-i', '--independentMnode']:
|
||||
independentMnode = value
|
||||
|
||||
if key in ['-R', '--restful']:
|
||||
restful = True
|
||||
|
||||
if key in ['-W', '--websocket']:
|
||||
websocket = True
|
||||
|
||||
if key in ['-a', '--asan']:
|
||||
asan = True
|
||||
|
||||
if key in ['-D', '--adaptercfgupdate']:
|
||||
try:
|
||||
adaptercfgupdate = eval(base64.b64decode(value.encode()).decode())
|
||||
except:
|
||||
print('adapter cfg update convert fail.')
|
||||
sys.exit(0)
|
||||
|
||||
if key in ['-n', '--replicaVar']:
|
||||
replicaVar = value
|
||||
|
||||
if key in ['-P', '--previous']:
|
||||
previousCluster = True
|
||||
|
||||
if key in ['-G', '--crashGen']:
|
||||
crashGen = True
|
||||
|
||||
|
||||
#
|
||||
# do exeCmd command
|
||||
#
|
||||
if not execCmd == "":
|
||||
if restful or websocket:
|
||||
tAdapter.init(deployPath)
|
||||
else:
|
||||
tdDnodes.init(deployPath)
|
||||
print(execCmd)
|
||||
exec(execCmd)
|
||||
quit()
|
||||
|
||||
#
|
||||
# do stop option
|
||||
#
|
||||
if (stop != 0):
|
||||
if (valgrind == 0):
|
||||
toBeKilled = "taosd"
|
||||
else:
|
||||
toBeKilled = "valgrind.bin"
|
||||
|
||||
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
|
||||
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
for port in range(6030, 6041):
|
||||
usePortPID = "lsof -i tcp:%d | grep LISTEN | awk '{print $2}'" % port
|
||||
processID = subprocess.check_output(usePortPID, shell=True)
|
||||
|
||||
if processID:
|
||||
killCmd = "kill -TERM %s" % processID
|
||||
os.system(killCmd)
|
||||
fuserCmd = "fuser -k -n tcp %d" % port
|
||||
os.system(fuserCmd)
|
||||
if valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
if restful or websocket:
|
||||
toBeKilled = "taosadapter"
|
||||
|
||||
# killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
|
||||
killCmd = f"pkill {toBeKilled}"
|
||||
|
||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||
# psCmd = f"pgrep {toBeKilled}"
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
|
||||
port = 6041
|
||||
usePortPID = f"lsof -i tcp:{port} | grep LISTEN | awk '{{print $2}}'"
|
||||
processID = subprocess.check_output(usePortPID, shell=True)
|
||||
|
||||
if processID:
|
||||
killCmd = f"kill -TERM {processID}"
|
||||
os.system(killCmd)
|
||||
fuserCmd = f"fuser -k -n tcp {port}"
|
||||
os.system(fuserCmd)
|
||||
|
||||
tdLog.info('stop taosadapter')
|
||||
|
||||
tdLog.info('stop All dnodes')
|
||||
|
||||
#
|
||||
# get hostname
|
||||
#
|
||||
if masterIp == "":
|
||||
host = socket.gethostname()
|
||||
else:
|
||||
try:
|
||||
config = eval(masterIp)
|
||||
host = config["host"]
|
||||
except Exception as r:
|
||||
host = masterIp
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
|
||||
#
|
||||
# do previousCluster option
|
||||
#
|
||||
if previousCluster:
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
runOnPreviousCluster(host, tdDnodes.getSimCfgPath(), fileName)
|
||||
tdLog.info("run on previous cluster end.")
|
||||
quit()
|
||||
|
||||
#
|
||||
# windows run
|
||||
#
|
||||
if platform.system().lower() == 'windows':
|
||||
fileName = fileName.replace("/", os.sep)
|
||||
if (masterIp == "" and not fileName == "0-others\\udf_create.py"):
|
||||
threading.Thread(target=checkRunTimeError,daemon=True).start()
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.stopAll()
|
||||
key_word = 'tdCases.addWindows'
|
||||
is_test_framework = 0
|
||||
try:
|
||||
if key_word in open(fileName, encoding='UTF-8').read():
|
||||
is_test_framework = 1
|
||||
except Exception as r:
|
||||
print(r)
|
||||
updateCfgDictStr = ''
|
||||
# adapter_cfg_dict_str = ''
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode()
|
||||
if ((json.dumps(adapter_cfg_dict) == '{}') and hasattr(ucase, 'taosadapter_cfg_dict')):
|
||||
adapter_cfg_dict = ucase.taosadapter_cfg_dict
|
||||
# adapter_cfg_dict_str = f"-D {base64.b64encode(toml.dumps(adapter_cfg_dict).encode()).decode()}"
|
||||
except Exception as r:
|
||||
print(r)
|
||||
else:
|
||||
pass
|
||||
# if restful:
|
||||
tAdapter.init(deployPath, masterIp)
|
||||
tAdapter.stop(force_kill=True)
|
||||
|
||||
if dnodeNums == 1 :
|
||||
tdDnodes.deploy(1,updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
|
||||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
cursor.close()
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
else :
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.stopAll()
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.deploy(dnode.index, updateCfgDict)
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
# tdLog.info(tdDnodes.getSimCfgPath(),host)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
cluster.create_mnode(conn,mnodeNums)
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
print("check dnode ready")
|
||||
except Exception as r:
|
||||
print(r)
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
|
||||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
cursor.close()
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
|
||||
conn = None
|
||||
else:
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if testCluster:
|
||||
tdLog.info("Procedures for testing cluster")
|
||||
if fileName == "all":
|
||||
tdCases.runAllCluster()
|
||||
else:
|
||||
tdCases.runOneCluster(fileName)
|
||||
else:
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if fileName == "all":
|
||||
tdCases.runAllWindows(conn)
|
||||
else:
|
||||
tdCases.runOneWindows(conn, fileName, replicaVar)
|
||||
|
||||
if restart:
|
||||
if fileName == "all":
|
||||
tdLog.info("not need to query ")
|
||||
else:
|
||||
sp = fileName.rsplit(".", 1)
|
||||
if len(sp) == 2 and sp[1] == "py":
|
||||
tdDnodes.stopAll()
|
||||
tdDnodes.start(1)
|
||||
time.sleep(1)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
tdLog.info("query test after taosd restart")
|
||||
tdCases.runOneWindows(conn, sp[0] + "_" + "restart.py", replicaVar)
|
||||
else:
|
||||
tdLog.info("not need to query")
|
||||
else:
|
||||
tdDnodes.setKillValgrind(killValgrind)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.setAsan(asan)
|
||||
tdDnodes.stopAll()
|
||||
is_test_framework = 0
|
||||
key_word = 'tdCases.addLinux'
|
||||
try:
|
||||
if key_word in open(fileName).read():
|
||||
is_test_framework = 1
|
||||
except:
|
||||
pass
|
||||
if is_test_framework:
|
||||
moduleName = fileName.replace(".py", "").replace("/", ".")
|
||||
uModule = importlib.import_module(moduleName)
|
||||
try:
|
||||
ucase = uModule.TDTestCase()
|
||||
if (json.dumps(updateCfgDict) == '{}'):
|
||||
updateCfgDict = ucase.updatecfgDict
|
||||
if (json.dumps(adapter_cfg_dict) == '{}'):
|
||||
adapter_cfg_dict = ucase.taosadapter_cfg_dict
|
||||
except:
|
||||
pass
|
||||
|
||||
if restful or websocket:
|
||||
tAdapter.init(deployPath, masterIp)
|
||||
tAdapter.stop(force_kill=True)
|
||||
|
||||
if dnodeNums == 1 :
|
||||
# dnode is one
|
||||
tdDnodes.deploy(1,updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
# tdSql.init(conn.cursor())
|
||||
# tdSql.execute("create qnode on dnode 1")
|
||||
# tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
|
||||
# tdSql.query("show local variables;")
|
||||
# for i in range(tdSql.queryRows):
|
||||
# if tdSql.queryResult[i][0] == "queryPolicy" :
|
||||
# if int(tdSql.queryResult[i][1]) == int(queryPolicy):
|
||||
# tdLog.info('alter queryPolicy to %d successfully'%queryPolicy)
|
||||
# else :
|
||||
# tdLog.debug(tdSql.queryResult)
|
||||
# tdLog.exit("alter queryPolicy to %d failed"%queryPolicy)
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
|
||||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
cursor.close()
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
else :
|
||||
# dnode > 1 cluster
|
||||
tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums))
|
||||
print(independentMnode,"independentMnode valuse")
|
||||
dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums, mnodeNums=mnodeNums, independentMnode=independentMnode)
|
||||
tdDnodes = ClusterDnodes(dnodeslist)
|
||||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.setAsan(asan)
|
||||
tdDnodes.stopAll()
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.deploy(dnode.index,updateCfgDict)
|
||||
for dnode in tdDnodes.dnodes:
|
||||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
# create taos connect
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
print(tdDnodes.getSimCfgPath(),host)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
else:
|
||||
createDnodeNums=createDnodeNums
|
||||
cluster.create_dnode(conn,createDnodeNums)
|
||||
cluster.create_mnode(conn,mnodeNums)
|
||||
|
||||
try:
|
||||
if cluster.check_dnode(conn) :
|
||||
print("check dnode ready")
|
||||
except Exception as r:
|
||||
print(r)
|
||||
|
||||
# do queryPolicy option
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
|
||||
cursor.execute("show local variables")
|
||||
res = cursor.fetchall()
|
||||
for i in range(cursor.rowcount):
|
||||
if res[i][0] == "queryPolicy" :
|
||||
if int(res[i][1]) == int(queryPolicy):
|
||||
tdLog.info(f'alter queryPolicy to {queryPolicy} successfully')
|
||||
cursor.close()
|
||||
else:
|
||||
tdLog.debug(res)
|
||||
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
|
||||
|
||||
|
||||
# run case
|
||||
if testCluster:
|
||||
tdLog.info("Procedures for testing cluster")
|
||||
if fileName == "all":
|
||||
tdCases.runAllCluster()
|
||||
else:
|
||||
tdCases.runOneCluster(fileName)
|
||||
else:
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if fileName == "all":
|
||||
tdCases.runAllLinux(conn)
|
||||
else:
|
||||
tdCases.runOneLinux(conn, fileName, replicaVar)
|
||||
|
||||
# do restart option
|
||||
if restart:
|
||||
if fileName == "all":
|
||||
tdLog.info("not need to query ")
|
||||
else:
|
||||
sp = fileName.rsplit(".", 1)
|
||||
if len(sp) == 2 and sp[1] == "py":
|
||||
tdDnodes.stopAll()
|
||||
tdDnodes.start(1)
|
||||
time.sleep(1)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
tdLog.info("query test after taosd restart")
|
||||
tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py", replicaVar)
|
||||
else:
|
||||
tdLog.info("not need to query")
|
||||
|
||||
# close for end
|
||||
if conn is not None:
|
||||
conn.close()
|
||||
if asan:
|
||||
# tdDnodes.StopAllSigint()
|
||||
tdLog.info("Address sanitizer mode finished")
|
||||
else:
|
||||
if not crashGen:
|
||||
tdDnodes.stopAll()
|
||||
tdLog.info("stop all td process finished")
|
||||
sys.exit(0)
|
|
@ -0,0 +1,505 @@
|
|||
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
class TDTestCase:
|
||||
hostname = socket.gethostname()
|
||||
# rpcDebugFlagVal = '143'
|
||||
#clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
#clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
#updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
|
||||
# updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
|
||||
#print ("===================: ", updatecfgDict)
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def newcur(self,cfg,host,port):
|
||||
user = "root"
|
||||
password = "taosdata"
|
||||
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
|
||||
cur=con.cursor()
|
||||
print(cur)
|
||||
return cur
|
||||
|
||||
def initConsumerTable(self,cdbName='cdb'):
|
||||
tdLog.info("create consume database, and consume info table, and consume result table")
|
||||
tdSql.query("create database if not exists %s vgroups 1 wal_retention_period 3600"%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
|
||||
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now + %ds, %d, '%s', '%s', %d, %d, %d)"%(consumerId, consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
|
||||
tdLog.info("consume info sql: %s"%sql)
|
||||
tdSql.query(sql)
|
||||
|
||||
def selectConsumeResult(self,expectRows,cdbName='cdb'):
|
||||
resultList=[]
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
for i in range(expectRows):
|
||||
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
resultList.append(tdSql.getData(i , 3))
|
||||
|
||||
return resultList
|
||||
|
||||
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
|
||||
if valgrind == 1:
|
||||
logFile = cfgPath + '/../log/valgrind-tmq.log'
|
||||
shellCmd = 'nohup valgrind --log-file=' + logFile
|
||||
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
|
||||
|
||||
if (platform.system().lower() == 'windows'):
|
||||
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> nul 2>&1 &"
|
||||
else:
|
||||
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
|
||||
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
|
||||
shellCmd += "> /dev/null 2>&1 &"
|
||||
tdLog.info(shellCmd)
|
||||
os.system(shellCmd)
|
||||
|
||||
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum):
|
||||
tsql.execute("create database if not exists %s vgroups %d wal_retention_period 3600"%(dbName, vgroups))
|
||||
tsql.execute("use %s" %dbName)
|
||||
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1)
|
||||
if (i > 0) and (i%100 == 0):
|
||||
tsql.execute(sql)
|
||||
sql = pre_create
|
||||
if sql != pre_create:
|
||||
tsql.execute(sql)
|
||||
|
||||
event.set()
|
||||
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
|
||||
return
|
||||
|
||||
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs):
|
||||
tdLog.debug("start to insert data ............")
|
||||
tsql.execute("use %s" %dbName)
|
||||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d values "%(stbName,i)
|
||||
for j in range(rowsPerTbl):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
|
||||
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
|
||||
tsql.execute(sql)
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s_%d values " %(stbName,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
if sql != pre_insert:
|
||||
#print("insert sql:%s"%sql)
|
||||
tsql.execute(sql)
|
||||
tdLog.debug("insert data ............ [OK]")
|
||||
return
|
||||
|
||||
def prepareEnv(self, **parameterDict):
|
||||
print ("input parameters:")
|
||||
print (parameterDict)
|
||||
# create new connector for my thread
|
||||
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
|
||||
self.create_tables(tsql,\
|
||||
parameterDict["dbName"],\
|
||||
parameterDict["vgroups"],\
|
||||
parameterDict["stbName"],\
|
||||
parameterDict["ctbNum"])
|
||||
|
||||
self.insert_data(tsql,\
|
||||
parameterDict["dbName"],\
|
||||
parameterDict["stbName"],\
|
||||
parameterDict["ctbNum"],\
|
||||
parameterDict["rowsPerTbl"],\
|
||||
parameterDict["batchNum"],\
|
||||
parameterDict["startTs"])
|
||||
return
|
||||
|
||||
def tmqCase1(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 1: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db1', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'replica': self.replicaVar, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
|
||||
tdLog.info("1-insert process end, and start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.info("creat the same topic name , and start to consume")
|
||||
self.initConsumerTable()
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 20
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def tmqCase2(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 2: Produce while two consumers to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db2', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'replica': self.replicaVar, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
consumerId = 1
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 20
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
|
||||
tdLog.info("2-insert process end, and start to check consume result")
|
||||
expectRows = 2
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
if not (totalConsumeRows >= expectrowcnt):
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 2 end ...... ")
|
||||
|
||||
def tmqCase2a(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 2a: Produce while two consumers to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db2a', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb1', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'replica': self.replicaVar, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
tdSql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict['dbName'], parameterDict['stbName']))
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
consumerId = 1
|
||||
keyList = 'group.id:cgrp2,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
|
||||
tdLog.info("3-insert process end, and start to check consume result")
|
||||
expectRows = 2
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt * 2:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 2a end ...... ")
|
||||
|
||||
def tmqCase3(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 3: Produce while one consumers to subscribe one db, include 2 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db3', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'replica': self.replicaVar, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d replica %d wal_retention_period 3600" %(parameterDict['dbName'], parameterDict['vgroups'], parameterDict['replica']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db3', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 5000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
# consumerId = 1
|
||||
# self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 100
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("4-insert process end, and start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 3 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
cfgPath = buildPath + "/../sim/psim/cfg"
|
||||
tdLog.info("cfgPath: %s" % cfgPath)
|
||||
|
||||
self.tmqCase1(cfgPath, buildPath)
|
||||
self.tmqCase2(cfgPath, buildPath)
|
||||
self.tmqCase2a(cfgPath, buildPath)
|
||||
self.tmqCase3(cfgPath, buildPath)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,703 @@
|
|||
from distutils.log import error
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
import subprocess
|
||||
if (platform.system().lower() == 'windows'):
|
||||
import win32gui
|
||||
import threading
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files or "taosd.exe" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def prepare_udf_so(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
print(projPath)
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf1_dup = subprocess.Popen('(for /r %s %%i in ("udf1_dup.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf2 = subprocess.Popen('(for /r %s %%i in ("udf2.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf2_dup = subprocess.Popen('(for /r %s %%i in ("udf2_dup.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
if (not tdDnodes.dnodes[0].remoteIP == ""):
|
||||
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
|
||||
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1_dup.so',projPath+"\\debug\\build\\lib\\")
|
||||
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf2.so',projPath+"\\debug\\build\\lib\\")
|
||||
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf2_dup.so',projPath+"\\debug\\build\\lib\\")
|
||||
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
|
||||
self.libudf1_dup = self.libudf1_dup.replace('udf1_dup.dll','libudf1_dup.so')
|
||||
self.libudf2 = self.libudf2.replace('udf2.dll','libudf2.so')
|
||||
self.libudf2_dup = self.libudf2_dup.replace('udf2_dup.dll','libudf2_dup.so')
|
||||
else:
|
||||
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf1_dup = subprocess.Popen('find %s -name "libudf1_dup.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf2 = subprocess.Popen('find %s -name "libudf2.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf2_dup = subprocess.Popen('find %s -name "libudf2_dup.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
|
||||
self.libudf1_dup = self.libudf1_dup.replace('\r','').replace('\n','')
|
||||
self.libudf2 = self.libudf2.replace('\r','').replace('\n','')
|
||||
self.libudf2_dup = self.libudf2_dup.replace('\r','').replace('\n','')
|
||||
|
||||
|
||||
def prepare_data(self):
|
||||
|
||||
tdSql.execute("drop database if exists db ")
|
||||
tdSql.execute("create database if not exists db duration 100")
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
for i in range(9):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
|
||||
)
|
||||
tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
|
||||
tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
|
||||
|
||||
tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
|
||||
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
|
||||
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute("create table tb (ts timestamp , num1 int , num2 int, num3 double , num4 binary(30))")
|
||||
tdSql.execute(
|
||||
f'''insert into tb values
|
||||
( '2020-04-21 01:01:01.000', NULL, 1, 1, "binary1" )
|
||||
( '2020-10-21 01:01:01.000', 1, 1, 1.11, "binary1" )
|
||||
( '2020-12-31 01:01:01.000', 2, 22222, 22, "binary1" )
|
||||
( '2021-01-01 01:01:06.000', 3, 33333, 33, "binary1" )
|
||||
( '2021-05-07 01:01:10.000', 4, 44444, 44, "binary1" )
|
||||
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
|
||||
( '2021-09-30 01:01:16.000', 5, 55555, 55, "binary1" )
|
||||
( '2022-02-01 01:01:20.000', 6, 66666, 66, "binary1" )
|
||||
( '2022-10-28 01:01:26.000', 0, 00000, 00, "binary1" )
|
||||
( '2022-12-01 01:01:30.000', 8, -88888, -88, "binary1" )
|
||||
( '2022-12-31 01:01:36.000', 9, -9999999, -99, "binary1" )
|
||||
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, "binary1" )
|
||||
'''
|
||||
)
|
||||
|
||||
# udf functions with join
|
||||
ts_start = 1652517451000
|
||||
tdSql.execute("create stable st (ts timestamp , c1 int , c2 int ,c3 double ,c4 double ) tags(ind int)")
|
||||
tdSql.execute("create table sub1 using st tags(1)")
|
||||
tdSql.execute("create table sub2 using st tags(2)")
|
||||
|
||||
for i in range(10):
|
||||
ts = ts_start + i *1000
|
||||
tdSql.execute(" insert into sub1 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
|
||||
tdSql.execute(" insert into sub2 values({} , {},{},{},{})".format(ts,i ,i*10,i*100.0,i*1000.0))
|
||||
|
||||
|
||||
def create_udf_function(self):
|
||||
|
||||
for i in range(5):
|
||||
# create scalar functions
|
||||
tdSql.execute("create function udf1 as '%s' outputtype int;"%self.libudf1)
|
||||
|
||||
# create aggregate functions
|
||||
|
||||
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
function_nums = len(functions)
|
||||
if function_nums == 2:
|
||||
tdLog.info("create two udf functions success ")
|
||||
|
||||
# drop functions
|
||||
|
||||
tdSql.execute("drop function udf1")
|
||||
tdSql.execute("drop function udf2")
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
for function in functions:
|
||||
if "udf1" in function[0] or "udf2" in function[0]:
|
||||
tdLog.info("drop udf functions failed ")
|
||||
tdLog.exit("drop udf functions failed")
|
||||
|
||||
tdLog.info("drop two udf functions success ")
|
||||
|
||||
# create scalar functions
|
||||
tdSql.execute("create function udf1 as '%s' outputtype int;"%self.libudf1)
|
||||
tdSql.execute("create function udf1_dup as '%s' outputtype int;"%self.libudf1_dup)
|
||||
|
||||
# create aggregate functions
|
||||
|
||||
tdSql.execute("create aggregate function udf2 as '%s' outputtype double bufSize 8;"%self.libudf2)
|
||||
tdSql.execute("create aggregate function udf2_dup as '%s' outputtype double bufSize 8;"%self.libudf2_dup)
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
function_nums = len(functions)
|
||||
if function_nums == 2:
|
||||
tdLog.info("create two udf functions success ")
|
||||
|
||||
def basic_udf_query(self):
|
||||
# create tsma of udf
|
||||
tdSql.error("create tsma tsma_udf on db.tb function(udf1(num1)) interval(10m);") # DB error: Not buildin function (0.001656s)
|
||||
tdSql.error("create tsma tsma_udf on db.stb1 function(udf1(c1)) interval(10m);") # DB error: Not buildin function (0.001656s)
|
||||
# scalar functions
|
||||
|
||||
# udf1_dup
|
||||
tdSql.query("select udf1(num1) ,udf1_dup(num1) from tb")
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,2)
|
||||
tdSql.checkData(2,0,1)
|
||||
tdSql.checkData(2,1,2)
|
||||
|
||||
tdSql.execute("use db ")
|
||||
tdSql.query("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,None)
|
||||
tdSql.checkData(0,2,1)
|
||||
tdSql.checkData(0,3,1)
|
||||
tdSql.checkData(0,4,1.000000000)
|
||||
tdSql.checkData(0,5,1)
|
||||
tdSql.checkData(0,6,"binary1")
|
||||
tdSql.checkData(0,7,1)
|
||||
|
||||
tdSql.checkData(3,0,3)
|
||||
tdSql.checkData(3,1,1)
|
||||
tdSql.checkData(3,2,33333)
|
||||
tdSql.checkData(3,3,1)
|
||||
tdSql.checkData(3,4,33.000000000)
|
||||
tdSql.checkData(3,5,1)
|
||||
tdSql.checkData(3,6,"binary1")
|
||||
tdSql.checkData(3,7,1)
|
||||
|
||||
tdSql.checkData(11,0,None)
|
||||
tdSql.checkData(11,1,None)
|
||||
tdSql.checkData(11,2,None)
|
||||
tdSql.checkData(11,3,None)
|
||||
tdSql.checkData(11,4,None)
|
||||
tdSql.checkData(11,5,None)
|
||||
tdSql.checkData(11,6,"binary1")
|
||||
tdSql.checkData(11,7,1)
|
||||
|
||||
tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,None)
|
||||
tdSql.checkData(0,2,None)
|
||||
tdSql.checkData(0,3,None)
|
||||
tdSql.checkData(0,4,None)
|
||||
tdSql.checkData(0,5,None)
|
||||
tdSql.checkData(0,6,None)
|
||||
tdSql.checkData(0,7,None)
|
||||
|
||||
tdSql.checkData(20,0,8)
|
||||
tdSql.checkData(20,1,1)
|
||||
tdSql.checkData(20,2,88888)
|
||||
tdSql.checkData(20,3,1)
|
||||
tdSql.checkData(20,4,888)
|
||||
tdSql.checkData(20,5,1)
|
||||
tdSql.checkData(20,6,88)
|
||||
tdSql.checkData(20,7,1)
|
||||
|
||||
|
||||
# aggregate functions
|
||||
tdSql.query("select udf2(num1) ,udf2_dup(num1) from tb")
|
||||
val = tdSql.queryResult[0][0] + 100
|
||||
tdSql.checkData(0,1,val)
|
||||
|
||||
tdSql.query("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
|
||||
tdSql.checkData(0,0,15.362291496)
|
||||
tdSql.checkData(0,1,10000949.553189287)
|
||||
tdSql.checkData(0,2,168.633425216)
|
||||
|
||||
# Arithmetic compute
|
||||
tdSql.query("select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb")
|
||||
tdSql.checkData(0,0,115.362291496)
|
||||
tdSql.checkData(0,1,10000849.553189287)
|
||||
tdSql.checkData(0,2,16863.342521576)
|
||||
tdSql.checkData(0,3,1.686334252)
|
||||
|
||||
tdSql.query("select udf2(c1) ,udf2(c6) from stb1 ")
|
||||
tdSql.checkData(0,0,25.514701644)
|
||||
tdSql.checkData(0,1,265.247614504)
|
||||
|
||||
tdSql.query("select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 ")
|
||||
tdSql.checkData(0,0,125.514701644)
|
||||
tdSql.checkData(0,1,165.247614504)
|
||||
tdSql.checkData(0,2,2551.470164435)
|
||||
tdSql.checkData(0,3,2.652476145)
|
||||
|
||||
# # bug for crash when query sub table
|
||||
tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1")
|
||||
tdSql.checkData(0,0,378.215547010)
|
||||
tdSql.checkData(0,1,353.808067460)
|
||||
tdSql.checkData(0,2,2114.237451187)
|
||||
tdSql.checkData(0,3,2.125468151)
|
||||
|
||||
tdSql.query("select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 ")
|
||||
tdSql.checkData(0,0,490.358032462)
|
||||
tdSql.checkData(0,1,400.460106627)
|
||||
tdSql.checkData(0,2,2551.470164435)
|
||||
tdSql.checkData(0,3,2.652476145)
|
||||
|
||||
|
||||
# regular table with aggregate functions
|
||||
|
||||
tdSql.error("select udf1(num1) , count(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , avg(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , twa(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , irate(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , sum(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , stddev(num1) from tb;")
|
||||
tdSql.error("select udf1(num1) , HYPERLOGLOG(num1) from tb;")
|
||||
# stable
|
||||
tdSql.error("select udf1(c1) , count(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , avg(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , twa(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , irate(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , sum(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , stddev(c1) from stb1;")
|
||||
tdSql.error("select udf1(c1) , HYPERLOGLOG(c1) from stb1;")
|
||||
|
||||
# regular table with select functions
|
||||
|
||||
tdSql.query("select udf1(num1) , max(num1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select floor(num1) , max(num1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(num1) , min(num1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select ceil(num1) , min(num1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(num1) , first(num1) from tb;")
|
||||
|
||||
tdSql.query("select abs(num1) , first(num1) from tb;")
|
||||
|
||||
tdSql.query("select udf1(num1) , last(num1) from tb;")
|
||||
|
||||
tdSql.query("select round(num1) , last(num1) from tb;")
|
||||
|
||||
tdSql.query("select udf1(num1) , top(num1,1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(num1) , bottom(num1,1) from tb;")
|
||||
tdSql.checkRows(1)
|
||||
# tdSql.query("select udf1(num1) , last_row(num1) from tb;")
|
||||
# tdSql.checkRows(1)
|
||||
|
||||
# tdSql.query("select round(num1) , last_row(num1) from tb;")
|
||||
# tdSql.checkRows(1)
|
||||
|
||||
|
||||
# stable
|
||||
tdSql.query("select udf1(c1) , max(c1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select abs(c1) , max(c1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(c1) , min(c1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select floor(c1) , min(c1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(c1) , first(c1) from stb1;")
|
||||
|
||||
tdSql.query("select udf1(c1) , last(c1) from stb1;")
|
||||
|
||||
tdSql.query("select udf1(c1) , top(c1 ,1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select abs(c1) , top(c1 ,1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf1(c1) , bottom(c1,1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select ceil(c1) , bottom(c1,1) from stb1;")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
# tdSql.query("select udf1(c1) , last_row(c1) from stb1;")
|
||||
# tdSql.checkRows(1)
|
||||
# tdSql.query("select ceil(c1) , last_row(c1) from stb1;")
|
||||
# tdSql.checkRows(1)
|
||||
|
||||
# regular table with compute functions
|
||||
|
||||
tdSql.query("select udf1(num1) , abs(num1) from tb;")
|
||||
tdSql.checkRows(12)
|
||||
tdSql.query("select floor(num1) , abs(num1) from tb;")
|
||||
tdSql.checkRows(12)
|
||||
|
||||
# # bug need fix
|
||||
|
||||
#tdSql.query("select udf1(num1) , csum(num1) from tb;")
|
||||
#tdSql.checkRows(9)
|
||||
#tdSql.query("select ceil(num1) , csum(num1) from tb;")
|
||||
#tdSql.checkRows(9)
|
||||
#tdSql.query("select udf1(c1) , csum(c1) from stb1;")
|
||||
#tdSql.checkRows(22)
|
||||
#tdSql.query("select floor(c1) , csum(c1) from stb1;")
|
||||
#tdSql.checkRows(22)
|
||||
|
||||
# stable with compute functions
|
||||
tdSql.query("select udf1(c1) , abs(c1) from stb1;")
|
||||
tdSql.checkRows(25)
|
||||
tdSql.query("select abs(c1) , ceil(c1) from stb1;")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
# nest query
|
||||
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;")
|
||||
tdSql.checkRows(25)
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,None)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,8)
|
||||
|
||||
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
|
||||
tdSql.checkRows(13)
|
||||
tdSql.checkData(0,0,1)
|
||||
tdSql.checkData(0,1,8)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,7)
|
||||
|
||||
# bug fix for crash
|
||||
# order by udf function result
|
||||
for _ in range(50):
|
||||
tdSql.query("select udf2(c1) from stb1 group by 1-udf1(c1)")
|
||||
print(tdSql.queryResult)
|
||||
|
||||
# udf functions with filter
|
||||
|
||||
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,None)
|
||||
|
||||
tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,0,9)
|
||||
tdSql.checkData(0,1,1)
|
||||
tdSql.checkData(0,2,-99.990000000)
|
||||
tdSql.checkData(0,3,1)
|
||||
|
||||
tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,0)
|
||||
tdSql.checkData(0,1,0)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,10)
|
||||
|
||||
tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,1)
|
||||
tdSql.checkData(0,1,1)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,1)
|
||||
|
||||
tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,0)
|
||||
tdSql.checkData(0,1,1)
|
||||
tdSql.checkData(0,2,0)
|
||||
tdSql.checkData(0,3,1)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,1)
|
||||
tdSql.checkData(1,2,10)
|
||||
tdSql.checkData(1,3,1)
|
||||
|
||||
tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,16.881943016)
|
||||
tdSql.checkData(0,1,168.819430161)
|
||||
tdSql.error("select sub1.c1 , udf2(sub1.c1), sub2.c2 ,udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
|
||||
# udf functions with group by
|
||||
tdSql.query("select udf1(c1) from ct1 group by c1")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select udf1(c1) from stb1 group by c1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query("select c1,c2, udf1(c1,c2) from ct1 group by c1,c2")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select c1,c2, udf1(c1,c2) from stb1 group by c1,c2")
|
||||
tdSql.checkRows(11)
|
||||
|
||||
tdSql.query("select udf2(c1) from ct1 group by c1")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select udf2(c1) from stb1 group by c1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query("select c1,c2, udf2(c1,c6) from ct1 group by c1,c2")
|
||||
tdSql.checkRows(10)
|
||||
tdSql.query("select c1,c2, udf2(c1,c6) from stb1 group by c1,c2")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.query("select udf2(c1) from stb1 group by udf1(c1)")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.query("select udf2(c1) from stb1 group by floor(c1)")
|
||||
tdSql.checkRows(11)
|
||||
|
||||
# udf mix with order by
|
||||
tdSql.query("select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)")
|
||||
tdSql.checkRows(11)
|
||||
|
||||
|
||||
def multi_cols_udf(self):
|
||||
tdSql.query("select num1,num2,num3,udf1(num1,num2,num3) from tb")
|
||||
tdSql.checkData(0,0,None)
|
||||
tdSql.checkData(0,1,1)
|
||||
tdSql.checkData(0,2,1.000000000)
|
||||
tdSql.checkData(0,3,None)
|
||||
tdSql.checkData(1,0,1)
|
||||
tdSql.checkData(1,1,1)
|
||||
tdSql.checkData(1,2,1.110000000)
|
||||
tdSql.checkData(1,3,88)
|
||||
|
||||
tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts")
|
||||
tdSql.checkData(1,0,8)
|
||||
tdSql.checkData(1,1,88.880000000)
|
||||
tdSql.checkData(1,2,88)
|
||||
|
||||
tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;")
|
||||
tdSql.checkRows(22)
|
||||
|
||||
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,169.661427555)
|
||||
tdSql.checkData(0,1,169.661427555)
|
||||
|
||||
def try_query_sql(self):
|
||||
udf1_sqls = [
|
||||
"select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb" ,
|
||||
"select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1" ,
|
||||
"select udf1(num1) , max(num1) from tb;" ,
|
||||
"select udf1(num1) , min(num1) from tb;" ,
|
||||
#"select udf1(num1) , top(num1,1) from tb;" ,
|
||||
#"select udf1(num1) , bottom(num1,1) from tb;" ,
|
||||
"select udf1(c1) , max(c1) from stb1;" ,
|
||||
"select udf1(c1) , min(c1) from stb1;" ,
|
||||
#"select udf1(c1) , top(c1 ,1) from stb1;" ,
|
||||
#"select udf1(c1) , bottom(c1,1) from stb1;" ,
|
||||
"select udf1(num1) , abs(num1) from tb;" ,
|
||||
#"select udf1(num1) , csum(num1) from tb;" ,
|
||||
#"select udf1(c1) , csum(c1) from stb1;" ,
|
||||
"select udf1(c1) , abs(c1) from stb1;" ,
|
||||
"select abs(udf1(c1)) , abs(ceil(c1)) from stb1 order by ts;" ,
|
||||
"select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;" ,
|
||||
"select abs(udf1(c1)) , abs(ceil(c1)) from stb1 where c1 is null order by ts;" ,
|
||||
"select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts" ,
|
||||
"select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf1(c1) from ct1 group by c1" ,
|
||||
"select udf1(c1) from stb1 group by c1" ,
|
||||
"select c1,c2, udf1(c1,c2) from ct1 group by c1,c2" ,
|
||||
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
|
||||
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
|
||||
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
|
||||
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
|
||||
]
|
||||
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
|
||||
"select udf2(num1) ,udf2(num2), udf2(num3) from tb" ,
|
||||
"select udf2(num1)+100 ,udf2(num2)-100, udf2(num3)*100 ,udf2(num3)/100 from tb" ,
|
||||
"select udf2(c1) ,udf2(c6) from stb1 " ,
|
||||
"select udf2(c1)+100 ,udf2(c6)-100 ,udf2(c1)*100 ,udf2(c6)/100 from stb1 " ,
|
||||
"select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from ct1" ,
|
||||
"select udf2(c1+100) ,udf2(c6-100) ,udf2(c1*100) ,udf2(c6/100) from stb1 " ,
|
||||
"select udf2(c1) from ct1 group by c1" ,
|
||||
"select udf2(c1) from stb1 group by c1" ,
|
||||
"select c1,c2, udf2(c1,c6) from ct1 group by c1,c2" ,
|
||||
"select c1,c2, udf2(c1,c6) from stb1 group by c1,c2" ,
|
||||
"select udf2(c1) from stb1 group by udf1(c1)" ,
|
||||
"select udf2(c1) from stb1 group by floor(c1)" ,
|
||||
"select udf2(c1) from stb1 group by floor(c1) order by udf2(c1)" ,
|
||||
|
||||
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null"]
|
||||
|
||||
return udf1_sqls ,udf2_sqls
|
||||
|
||||
|
||||
def checkRunTimeError(self):
|
||||
if (platform.system().lower() == 'windows' and tdDnodes.dnodes[0].remoteIP == ""):
|
||||
while 1:
|
||||
time.sleep(1)
|
||||
hwnd = win32gui.FindWindow(None, "Microsoft Visual C++ Runtime Library")
|
||||
if hwnd:
|
||||
os.system("TASKKILL /F /IM udfd.exe")
|
||||
|
||||
def unexpected_create(self):
|
||||
if (platform.system().lower() == 'windows' and tdDnodes.dnodes[0].remoteIP == ""):
|
||||
checkErrorThread = threading.Thread(target=self.checkRunTimeError,daemon=True)
|
||||
checkErrorThread.start()
|
||||
|
||||
tdLog.info(" create function with out bufsize ")
|
||||
tdSql.query("drop function udf1 ")
|
||||
tdSql.query("drop function udf2 ")
|
||||
|
||||
# create function without buffer
|
||||
tdSql.execute("create function udf1 as '%s' outputtype int"%self.libudf1)
|
||||
tdSql.execute("create aggregate function udf2 as '%s' outputtype double"%self.libudf2)
|
||||
udf1_sqls ,udf2_sqls = self.try_query_sql()
|
||||
|
||||
for scalar_sql in udf1_sqls:
|
||||
tdSql.query(scalar_sql)
|
||||
for aggregate_sql in udf2_sqls:
|
||||
tdSql.error(aggregate_sql)
|
||||
|
||||
# create function without aggregate
|
||||
|
||||
tdLog.info(" create function with out aggregate ")
|
||||
tdSql.query("drop function udf1 ")
|
||||
tdSql.query("drop function udf2 ")
|
||||
|
||||
# create function without buffer
|
||||
tdSql.execute("create aggregate function udf1 as '%s' outputtype int bufSize 8 "%self.libudf1)
|
||||
tdSql.execute("create function udf2 as '%s' outputtype double "%self.libudf2)
|
||||
udf1_sqls ,udf2_sqls = self.try_query_sql()
|
||||
|
||||
for scalar_sql in udf1_sqls:
|
||||
tdSql.error(scalar_sql)
|
||||
for aggregate_sql in udf2_sqls:
|
||||
tdSql.error(aggregate_sql)
|
||||
|
||||
tdSql.execute(" create function db as '%s' outputtype int "%self.libudf1)
|
||||
tdSql.execute(" create aggregate function test as '%s' outputtype int bufSize 8 "%self.libudf1)
|
||||
tdSql.error(" select db(c1) from stb1 ")
|
||||
tdSql.error(" select db(c1,c6), db(c6) from stb1 ")
|
||||
tdSql.error(" select db(num1,num2), db(num1) from tb ")
|
||||
tdSql.error(" select test(c1) from stb1 ")
|
||||
tdSql.error(" select test(c1,c6), test(c6) from stb1 ")
|
||||
tdSql.error(" select test(num1,num2), test(num1) from tb ")
|
||||
|
||||
|
||||
|
||||
def loop_kill_udfd(self):
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
cfgPath = buildPath + "/../sim/dnode1/cfg"
|
||||
udfdPath = buildPath +'/build/bin/udfd'
|
||||
|
||||
for i in range(3):
|
||||
|
||||
tdLog.info(" loop restart udfd %d_th" % i)
|
||||
|
||||
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,169.661427555)
|
||||
tdSql.checkData(0,1,169.661427555)
|
||||
# stop udfd cmds
|
||||
get_processID = "ps -ef | grep -w udfd | grep -v grep| grep -v defunct | awk '{print $2}'"
|
||||
processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
|
||||
stop_udfd = " kill -9 %s" % processID
|
||||
os.system(stop_udfd)
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,169.661427555)
|
||||
tdSql.checkData(0,1,169.661427555)
|
||||
|
||||
# # start udfd cmds
|
||||
# start_udfd = "nohup " + udfdPath +'-c' +cfgPath +" > /dev/null 2>&1 &"
|
||||
# tdLog.info("start udfd : %s " % start_udfd)
|
||||
|
||||
def test_function_name(self):
|
||||
tdLog.info(" create function name is not build_in functions ")
|
||||
tdSql.execute(" drop function udf1 ")
|
||||
tdSql.execute(" drop function udf2 ")
|
||||
tdSql.error("create function max as '%s' outputtype int"%self.libudf1)
|
||||
tdSql.error("create aggregate function sum as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create function max as '%s' outputtype int"%self.libudf1)
|
||||
tdSql.error("create aggregate function sum as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function tbname as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function function as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function stable as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function union as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function 123 as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function 123db as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
tdSql.error("create aggregate function mnode as '%s' outputtype double bufSize 8"%self.libudf2)
|
||||
|
||||
def restart_taosd_query_udf(self):
|
||||
|
||||
self.create_udf_function()
|
||||
|
||||
for i in range(5):
|
||||
tdLog.info(" this is %d_th restart taosd " %i)
|
||||
tdSql.execute("use db ")
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select udf2(sub1.c1 ,sub1.c2), udf2(sub2.c2 ,sub2.c1) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
|
||||
tdSql.checkData(0,0,169.661427555)
|
||||
tdSql.checkData(0,1,169.661427555)
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
|
||||
print(" env is ok for all ")
|
||||
self.prepare_udf_so()
|
||||
self.prepare_data()
|
||||
self.create_udf_function()
|
||||
self.basic_udf_query()
|
||||
self.unexpected_create()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
Loading…
Reference in New Issue