From a5d6851cbeeab12bda42348b58cd6370ecfc88f1 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 24 Feb 2021 13:15:23 +0800 Subject: [PATCH 01/82] implement compact interfaces --- src/inc/tsdb.h | 3 +++ src/tsdb/inc/tsdbCommitQueue.h | 4 +++- src/tsdb/inc/tsdbCompact.h | 28 ++++++++++++++++++++++++++++ src/tsdb/inc/tsdbint.h | 2 ++ src/tsdb/src/tsdbCommitQueue.c | 23 +++++++++++++++++------ src/tsdb/src/tsdbCompact.c | 6 +++++- src/tsdb/src/tsdbMemTable.c | 2 +- 7 files changed, 59 insertions(+), 9 deletions(-) create mode 100644 src/tsdb/inc/tsdbCompact.h diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 78cd2927c7..d6c9fed971 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -355,6 +355,9 @@ void tsdbDecCommitRef(int vgId); int tsdbSyncSend(void *pRepo, SOCKET socketFd); int tsdbSyncRecv(void *pRepo, SOCKET socketFd); +// For TSDB Compact +int tsdbCompact(STsdbRepo *pRepo); + #ifdef __cplusplus } #endif diff --git a/src/tsdb/inc/tsdbCommitQueue.h b/src/tsdb/inc/tsdbCommitQueue.h index c2353391f9..6342c036b7 100644 --- a/src/tsdb/inc/tsdbCommitQueue.h +++ b/src/tsdb/inc/tsdbCommitQueue.h @@ -16,6 +16,8 @@ #ifndef _TD_TSDB_COMMIT_QUEUE_H_ #define _TD_TSDB_COMMIT_QUEUE_H_ -int tsdbScheduleCommit(STsdbRepo *pRepo); +typedef enum { COMMIT_REQ, COMPACT_REQ } TSDB_REQ_T; + +int tsdbScheduleCommit(STsdbRepo *pRepo, TSDB_REQ_T req); #endif /* _TD_TSDB_COMMIT_QUEUE_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbCompact.h b/src/tsdb/inc/tsdbCompact.h new file mode 100644 index 0000000000..5a382de5e0 --- /dev/null +++ b/src/tsdb/inc/tsdbCompact.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef _TD_TSDB_COMPACT_H_ +#define _TD_TSDB_COMPACT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +void *tsdbCompactImpl(STsdbRepo *pRepo); + +#ifdef __cplusplus +} +#endif + +#endif /* _TD_TSDB_COMPACT_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index 074ff20f22..0a448d8194 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -64,6 +64,8 @@ extern "C" { #include "tsdbReadImpl.h" // Commit #include "tsdbCommit.h" +// Compact +#include "tsdbCompact.h" // Commit Queue #include "tsdbCommitQueue.h" // Main definitions diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c index 9e8e4acd7e..d515faf861 100644 --- a/src/tsdb/src/tsdbCommitQueue.c +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -26,8 +26,9 @@ typedef struct { } SCommitQueue; typedef struct { + TSDB_REQ_T req; STsdbRepo *pRepo; -} SCommitReq; +} SReq; static void *tsdbLoopCommit(void *arg); @@ -90,16 +91,17 @@ void tsdbDestroyCommitQueue() { pthread_mutex_destroy(&(pQueue->lock)); } -int tsdbScheduleCommit(STsdbRepo *pRepo) { +int tsdbScheduleCommit(STsdbRepo *pRepo, TSDB_REQ_T req) { SCommitQueue *pQueue = &tsCommitQueue; - SListNode *pNode = (SListNode *)calloc(1, sizeof(SListNode) + sizeof(SCommitReq)); + SListNode *pNode = (SListNode *)calloc(1, sizeof(SListNode) + sizeof(SReq)); if (pNode == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - ((SCommitReq *)pNode->data)->pRepo = pRepo; + ((SReq *)pNode->data)->req = req; + ((SReq *)pNode->data)->pRepo = pRepo; pthread_mutex_lock(&(pQueue->lock)); @@ -116,6 +118,7 @@ static void *tsdbLoopCommit(void *arg) { SCommitQueue *pQueue = &tsCommitQueue; SListNode * pNode = NULL; STsdbRepo * pRepo = NULL; + TSDB_REQ_T req; while (true) { pthread_mutex_lock(&(pQueue->lock)); @@ -136,9 +139,17 @@ static void *tsdbLoopCommit(void *arg) { pthread_mutex_unlock(&(pQueue->lock)); - pRepo = ((SCommitReq *)pNode->data)->pRepo; + req = ((SReq *)pNode->data)->req; + pRepo = ((SReq *)pNode->data)->pRepo; + + if (req == COMMIT_REQ) { + tsdbCommitData(pRepo); + } else if (req == COMPACT_REQ) { + tsdbCompactImpl(pRepo); + } else { + ASSERT(0); + } - tsdbCommitData(pRepo); listNodeFree(pNode); } diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 6dea4a4e57..d8cd558424 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -11,4 +11,8 @@ * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . - */ \ No newline at end of file + */ +#include "tsdb.h" + +int tsdbCompact(STsdbRepo *pRepo) { return 0; } +void *tsdbCompactImpl(STsdbRepo *pRepo) { return NULL; } \ No newline at end of file diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 73a1270799..2e4a73e56b 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -232,7 +232,7 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) { if (tsdbLockRepo(pRepo) < 0) return -1; pRepo->imem = pRepo->mem; pRepo->mem = NULL; - tsdbScheduleCommit(pRepo); + tsdbScheduleCommit(pRepo, COMMIT_REQ); if (tsdbUnlockRepo(pRepo) < 0) return -1; return 0; From a46e84dce78a0f22b2dfed290b0e52a5a39f4f32 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 25 Feb 2021 15:26:57 +0800 Subject: [PATCH 02/82] partial work --- src/inc/tfs.h | 2 + src/tsdb/inc/tsdbCommit.h | 7 + src/tsdb/src/tsdbCommit.c | 330 ++++++++++++++++++++------------------ 3 files changed, 182 insertions(+), 157 deletions(-) diff --git a/src/inc/tfs.h b/src/inc/tfs.h index 76e9b17a62..26ae033ac6 100644 --- a/src/inc/tfs.h +++ b/src/inc/tfs.h @@ -31,6 +31,8 @@ typedef struct { #define TFS_UNDECIDED_ID -1 #define TFS_PRIMARY_LEVEL 0 #define TFS_PRIMARY_ID 0 +#define TFS_MIN_LEVEL 0 +#define TFS_MAX_LEVEL (TSDB_MAX_TIERS - 1) // FS APIs ==================================== typedef struct { diff --git a/src/tsdb/inc/tsdbCommit.h b/src/tsdb/inc/tsdbCommit.h index 5e740081d1..9612d15018 100644 --- a/src/tsdb/inc/tsdbCommit.h +++ b/src/tsdb/inc/tsdbCommit.h @@ -29,10 +29,17 @@ typedef struct { int64_t size; } SKVRecord; +#define TSDB_DEFAULT_BLOCK_ROWS(maxRows) ((maxRows)*4 / 5) + void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn); int tsdbEncodeKVRecord(void **buf, SKVRecord *pRecord); void *tsdbDecodeKVRecord(void *buf, SKVRecord *pRecord); void *tsdbCommitData(STsdbRepo *pRepo); +int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn); +int tsdbWriteBlockInfoImpl(SDFile *pHeadf, STable *pTable, SArray *pSupA, SArray *pSubA, void **ppBuf, SBlockIdx *pIdx); +int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf); +int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, + bool isLast, bool isSuper, void **ppBuf, void **ppCBuf); static FORCE_INLINE int tsdbGetFidLevel(int fid, SRtn *pRtn) { if (fid >= pRtn->maxFid) { diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index a4cc316725..5656db69a4 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -45,7 +45,7 @@ typedef struct { #define TSDB_COMMIT_LAST_FILE(ch) TSDB_DFILE_IN_SET(TSDB_COMMIT_WRITE_FSET(ch), TSDB_FILE_LAST) #define TSDB_COMMIT_BUF(ch) TSDB_READ_BUF(&((ch)->readh)) #define TSDB_COMMIT_COMP_BUF(ch) TSDB_READ_COMP_BUF(&((ch)->readh)) -#define TSDB_COMMIT_DEFAULT_ROWS(ch) (TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock * 4 / 5) +#define TSDB_COMMIT_DEFAULT_ROWS(ch) TSDB_DEFAULT_BLOCK_ROWS(TSDB_COMMIT_REPO(ch)->config.maxRowsPerFileBlock) #define TSDB_COMMIT_TXN_VERSION(ch) FS_TXN_VERSION(REPO_FS(TSDB_COMMIT_REPO(ch))) static int tsdbCommitMeta(STsdbRepo *pRepo); @@ -66,7 +66,6 @@ static int tsdbCommitToTable(SCommitH *pCommith, int tid); static int tsdbSetCommitTable(SCommitH *pCommith, STable *pTable); static int tsdbComparKeyBlock(const void *arg1, const void *arg2); static int tsdbWriteBlockInfo(SCommitH *pCommih); -static int tsdbWriteBlockIdx(SCommitH *pCommih); static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData); static int tsdbMergeMemData(SCommitH *pCommith, SCommitIter *pIter, int bidx); static int tsdbMoveBlock(SCommitH *pCommith, int bidx); @@ -81,7 +80,6 @@ static bool tsdbCanAddSubBlock(SCommitH *pCommith, SBlock *pBlock, SMergeInfo *p static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIter *pCommitIter, SDataCols *pTarget, TSKEY maxKey, int maxRows, int8_t update); static int tsdbApplyRtn(STsdbRepo *pRepo); -static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn); void *tsdbCommitData(STsdbRepo *pRepo) { tsdbStartCommit(pRepo); @@ -109,6 +107,151 @@ _err: return NULL; } +int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) { + SDiskID did; + SDFileSet nSet; + STsdbFS * pfs = REPO_FS(pRepo); + int level; + + ASSERT(pSet->fid >= pRtn->minFid); + + level = tsdbGetFidLevel(pSet->fid, pRtn); + + tfsAllocDisk(level, &(did.level), &(did.id)); + if (did.level == TFS_UNDECIDED_LEVEL) { + terrno = TSDB_CODE_TDB_NO_AVAIL_DISK; + return -1; + } + + if (did.level > TSDB_FSET_LEVEL(pSet)) { + // Need to move the FSET to higher level + tsdbInitDFileSet(&nSet, did, REPO_ID(pRepo), pSet->fid, FS_TXN_VERSION(pfs)); + + if (tsdbCopyDFileSet(pSet, &nSet) < 0) { + tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid, + TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno)); + return -1; + } + + if (tsdbUpdateDFileSet(pfs, &nSet) < 0) { + return -1; + } + + tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid, + TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id); + } else { + // On a correct level + if (tsdbUpdateDFileSet(pfs, pSet) < 0) { + return -1; + } + } + + return 0; +} + +int tsdbWriteBlockInfoImpl(SDFile *pHeadf, STable *pTable, SArray *pSupA, SArray *pSubA, void **ppBuf, + SBlockIdx *pIdx) { + size_t nSupBlocks; + size_t nSubBlocks; + uint32_t tlen; + SBlockInfo *pBlkInfo; + int64_t offset; + SBlock * pBlock; + + memset(pIdx, 0, sizeof(*pIdx)); + + nSupBlocks = taosArrayGetSize(pSupA); + nSubBlocks = (pSubA == NULL) ? 0 : taosArrayGetSize(pSubA); + + if (nSupBlocks <= 0) { + // No data (data all deleted) + return 0; + } + + tlen = (uint32_t)(sizeof(SBlockInfo) + sizeof(SBlock) * (nSupBlocks + nSubBlocks) + sizeof(TSCKSUM)); + if (tsdbMakeRoom(ppBuf, tlen) < 0) return -1; + pBlkInfo = *ppBuf; + + pBlkInfo->delimiter = TSDB_FILE_DELIMITER; + pBlkInfo->tid = TABLE_TID(pTable); + pBlkInfo->uid = TABLE_UID(pTable); + + memcpy((void *)(pBlkInfo->blocks), taosArrayGet(pSupA, 0), nSupBlocks * sizeof(SBlock)); + if (nSubBlocks > 0) { + memcpy((void *)(pBlkInfo->blocks + nSupBlocks), taosArrayGet(pSubA, 0), nSubBlocks * sizeof(SBlock)); + + for (int i = 0; i < nSupBlocks; i++) { + pBlock = pBlkInfo->blocks + i; + + if (pBlock->numOfSubBlocks > 1) { + pBlock->offset += (sizeof(SBlockInfo) + sizeof(SBlock) * nSupBlocks); + } + } + } + + taosCalcChecksumAppend(0, (uint8_t *)pBlkInfo, tlen); + + if (tsdbAppendDFile(pHeadf, (void *)pBlkInfo, tlen, &offset) < 0) { + return -1; + } + + tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(pBlkInfo, tlen - sizeof(TSCKSUM))); + + // Set pIdx + pBlock = taosArrayGetLast(pSupA); + + pIdx->tid = TABLE_TID(pTable); + pIdx->uid = TABLE_UID(pTable); + pIdx->hasLast = pBlock->last ? 1 : 0; + pIdx->maxKey = pBlock->keyLast; + pIdx->numOfBlocks = (uint32_t)nSupBlocks; + pIdx->len = tlen; + pIdx->offset = (uint32_t)offset; + + return 0; +} + +int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) { + SBlockIdx *pBlkIdx; + size_t nidx = taosArrayGetSize(pIdxA); + int tlen = 0, size; + int64_t offset; + + if (nidx <= 0) { + // All data are deleted + pHeadf->info.offset = 0; + pHeadf->info.len = 0; + return 0; + } + + for (size_t i = 0; i < nidx; i++) { + pBlkIdx = (SBlockIdx *)taosArrayGet(pIdxA, i); + + size = tsdbEncodeSBlockIdx(NULL, pBlkIdx); + if (tsdbMakeRoom(ppBuf, tlen + size) < 0) return -1; + + void *ptr = POINTER_SHIFT(*ppBuf, tlen); + tsdbEncodeSBlockIdx(&ptr, pBlkIdx); + + tlen += size; + } + + tlen += sizeof(TSCKSUM); + if (tsdbMakeRoom(ppBuf, tlen) < 0) return -1; + taosCalcChecksumAppend(0, (uint8_t *)(*ppBuf), tlen); + + if (tsdbAppendDFile(pHeadf, *ppBuf, tlen, &offset) < tlen) { + return -1; + } + + tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(*ppBuf, tlen - sizeof(TSCKSUM))); + pHeadf->info.offset = (uint32_t)offset; + pHeadf->info.len = tlen; + + return 0; +} + + // =================== Commit Meta Data static int tsdbCommitMeta(STsdbRepo *pRepo) { STsdbFS * pfs = REPO_FS(pRepo); @@ -438,7 +581,8 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { } } - if (tsdbWriteBlockIdx(pCommith) < 0) { + if (tsdbWriteBlockIdx(TSDB_COMMIT_HEAD_FILE(pCommith), pCommith->aBlkIdx, (void **)(&(TSDB_COMMIT_BUF(pCommith)))) < + 0) { tsdbError("vgId:%d failed to write SBlockIdx part to FSET %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); tsdbCloseCommitFile(pCommith, true); // revert the file change @@ -738,23 +882,21 @@ static int tsdbComparKeyBlock(const void *arg1, const void *arg2) { } } -static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, bool isLast, - bool isSuper) { - STsdbRepo * pRepo = TSDB_COMMIT_REPO(pCommith); +int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, + bool isLast, bool isSuper, void **ppBuf, void **ppCBuf) { STsdbCfg * pCfg = REPO_CFG(pRepo); SBlockData *pBlockData; int64_t offset = 0; - STable * pTable = TSDB_COMMIT_TABLE(pCommith); int rowsToWrite = pDataCols->numOfRows; ASSERT(rowsToWrite > 0 && rowsToWrite <= pCfg->maxRowsPerFileBlock); ASSERT((!isLast) || rowsToWrite < pCfg->minRowsPerFileBlock); // Make buffer space - if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommith)), TSDB_BLOCK_STATIS_SIZE(pDataCols->numOfCols)) < 0) { + if (tsdbMakeRoom(ppBuf, TSDB_BLOCK_STATIS_SIZE(pDataCols->numOfCols)) < 0) { return -1; } - pBlockData = (SBlockData *)TSDB_COMMIT_BUF(pCommith); + pBlockData = (SBlockData *)(*ppBuf); // Get # of cols not all NULL(not including key column) int nColsNotAllNull = 0; @@ -800,23 +942,23 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo void * tptr; // Make room - if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommith)), lsize + tlen + COMP_OVERFLOW_BYTES + sizeof(TSCKSUM)) < 0) { + if (tsdbMakeRoom(ppBuf, lsize + tlen + COMP_OVERFLOW_BYTES + sizeof(TSCKSUM)) < 0) { return -1; } - pBlockData = (SBlockData *)TSDB_COMMIT_BUF(pCommith); + pBlockData = (SBlockData *)(*ppBuf); pBlockCol = pBlockData->cols + tcol; tptr = POINTER_SHIFT(pBlockData, lsize); if (pCfg->compression == TWO_STAGE_COMP && - tsdbMakeRoom((void **)(&TSDB_COMMIT_COMP_BUF(pCommith)), tlen + COMP_OVERFLOW_BYTES) < 0) { + tsdbMakeRoom(ppCBuf, tlen + COMP_OVERFLOW_BYTES) < 0) { return -1; } // Compress or just copy if (pCfg->compression) { flen = (*(tDataTypes[pDataCol->type].compFunc))((char *)pDataCol->pData, tlen, rowsToWrite, tptr, - tlen + COMP_OVERFLOW_BYTES, pCfg->compression, - TSDB_COMMIT_COMP_BUF(pCommith), tlen + COMP_OVERFLOW_BYTES); + tlen + COMP_OVERFLOW_BYTES, pCfg->compression, *ppCBuf, + tlen + COMP_OVERFLOW_BYTES); } else { flen = tlen; memcpy(tptr, pDataCol->pData, flen); @@ -872,68 +1014,27 @@ static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCo return 0; } +static int tsdbWriteBlock(SCommitH *pCommith, SDFile *pDFile, SDataCols *pDataCols, SBlock *pBlock, bool isLast, + bool isSuper) { + return tsdbWriteBlockImpl(TSDB_COMMIT_REPO(pCommith), TSDB_COMMIT_TABLE(pCommith), pDFile, pDataCols, pBlock, isLast, + isSuper, (void **)(&(TSDB_COMMIT_BUF(pCommith))), + (void **)(&(TSDB_COMMIT_COMP_BUF(pCommith)))); +} + + static int tsdbWriteBlockInfo(SCommitH *pCommih) { - SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih); - SBlockIdx blkIdx; - STable * pTable = TSDB_COMMIT_TABLE(pCommih); - SBlock * pBlock; - size_t nSupBlocks; - size_t nSubBlocks; - uint32_t tlen; - SBlockInfo *pBlkInfo; - int64_t offset; + SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih); + SBlockIdx blkIdx; + STable * pTable = TSDB_COMMIT_TABLE(pCommih); - nSupBlocks = taosArrayGetSize(pCommih->aSupBlk); - nSubBlocks = taosArrayGetSize(pCommih->aSubBlk); - - if (nSupBlocks <= 0) { - // No data (data all deleted) - return 0; - } - - tlen = (uint32_t)(sizeof(SBlockInfo) + sizeof(SBlock) * (nSupBlocks + nSubBlocks) + sizeof(TSCKSUM)); - - // Write SBlockInfo part - if (tsdbMakeRoom((void **)(&(TSDB_COMMIT_BUF(pCommih))), tlen) < 0) return -1; - pBlkInfo = TSDB_COMMIT_BUF(pCommih); - - pBlkInfo->delimiter = TSDB_FILE_DELIMITER; - pBlkInfo->tid = TABLE_TID(pTable); - pBlkInfo->uid = TABLE_UID(pTable); - - memcpy((void *)(pBlkInfo->blocks), taosArrayGet(pCommih->aSupBlk, 0), nSupBlocks * sizeof(SBlock)); - if (nSubBlocks > 0) { - memcpy((void *)(pBlkInfo->blocks + nSupBlocks), taosArrayGet(pCommih->aSubBlk, 0), nSubBlocks * sizeof(SBlock)); - - for (int i = 0; i < nSupBlocks; i++) { - pBlock = pBlkInfo->blocks + i; - - if (pBlock->numOfSubBlocks > 1) { - pBlock->offset += (sizeof(SBlockInfo) + sizeof(SBlock) * nSupBlocks); - } - } - } - - taosCalcChecksumAppend(0, (uint8_t *)pBlkInfo, tlen); - - if (tsdbAppendDFile(pHeadf, TSDB_COMMIT_BUF(pCommih), tlen, &offset) < 0) { + if (tsdbWriteBlockInfoImpl(pHeadf, pTable, pCommih->aSupBlk, pCommih->aSubBlk, (void **)(&(TSDB_COMMIT_BUF(pCommih))), + &blkIdx) < 0) { return -1; } - tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(pBlkInfo, tlen - sizeof(TSCKSUM))); - - // Set blkIdx - pBlock = taosArrayGet(pCommih->aSupBlk, nSupBlocks - 1); - - blkIdx.tid = TABLE_TID(pTable); - blkIdx.uid = TABLE_UID(pTable); - blkIdx.hasLast = pBlock->last ? 1 : 0; - blkIdx.maxKey = pBlock->keyLast; - blkIdx.numOfBlocks = (uint32_t)nSupBlocks; - blkIdx.len = tlen; - blkIdx.offset = (uint32_t)offset; - - ASSERT(blkIdx.numOfBlocks > 0); + if (blkIdx.numOfBlocks == 0) { + return 0; + } if (taosArrayPush(pCommih->aBlkIdx, (void *)(&blkIdx)) == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -943,49 +1044,6 @@ static int tsdbWriteBlockInfo(SCommitH *pCommih) { return 0; } -static int tsdbWriteBlockIdx(SCommitH *pCommih) { - SBlockIdx *pBlkIdx; - SDFile * pHeadf = TSDB_COMMIT_HEAD_FILE(pCommih); - size_t nidx = taosArrayGetSize(pCommih->aBlkIdx); - int tlen = 0, size; - int64_t offset; - - if (nidx <= 0) { - // All data are deleted - pHeadf->info.offset = 0; - pHeadf->info.len = 0; - return 0; - } - - for (size_t i = 0; i < nidx; i++) { - pBlkIdx = (SBlockIdx *)taosArrayGet(pCommih->aBlkIdx, i); - - size = tsdbEncodeSBlockIdx(NULL, pBlkIdx); - if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommih)), tlen + size) < 0) return -1; - - void *ptr = POINTER_SHIFT(TSDB_COMMIT_BUF(pCommih), tlen); - tsdbEncodeSBlockIdx(&ptr, pBlkIdx); - - tlen += size; - } - - tlen += sizeof(TSCKSUM); - if (tsdbMakeRoom((void **)(&TSDB_COMMIT_BUF(pCommih)), tlen) < 0) return -1; - taosCalcChecksumAppend(0, (uint8_t *)TSDB_COMMIT_BUF(pCommih), tlen); - - if (tsdbAppendDFile(pHeadf, TSDB_COMMIT_BUF(pCommih), tlen, &offset) < tlen) { - tsdbError("vgId:%d failed to write block index part to file %s since %s", TSDB_COMMIT_REPO_ID(pCommih), - TSDB_FILE_FULL_NAME(pHeadf), tstrerror(terrno)); - return -1; - } - - tsdbUpdateDFileMagic(pHeadf, POINTER_SHIFT(TSDB_COMMIT_BUF(pCommih), tlen - sizeof(TSCKSUM))); - pHeadf->info.offset = (uint32_t)offset; - pHeadf->info.len = tlen; - - return 0; -} - static int tsdbCommitMemData(SCommitH *pCommith, SCommitIter *pIter, TSKEY keyLimit, bool toData) { STsdbRepo *pRepo = TSDB_COMMIT_REPO(pCommith); STsdbCfg * pCfg = REPO_CFG(pRepo); @@ -1438,45 +1496,3 @@ static int tsdbApplyRtn(STsdbRepo *pRepo) { return 0; } - -static int tsdbApplyRtnOnFSet(STsdbRepo *pRepo, SDFileSet *pSet, SRtn *pRtn) { - SDiskID did; - SDFileSet nSet; - STsdbFS * pfs = REPO_FS(pRepo); - int level; - - ASSERT(pSet->fid >= pRtn->minFid); - - level = tsdbGetFidLevel(pSet->fid, pRtn); - - tfsAllocDisk(level, &(did.level), &(did.id)); - if (did.level == TFS_UNDECIDED_LEVEL) { - terrno = TSDB_CODE_TDB_NO_AVAIL_DISK; - return -1; - } - - if (did.level > TSDB_FSET_LEVEL(pSet)) { - // Need to move the FSET to higher level - tsdbInitDFileSet(&nSet, did, REPO_ID(pRepo), pSet->fid, FS_TXN_VERSION(pfs)); - - if (tsdbCopyDFileSet(pSet, &nSet) < 0) { - tsdbError("vgId:%d failed to copy FSET %d from level %d to level %d since %s", REPO_ID(pRepo), pSet->fid, - TSDB_FSET_LEVEL(pSet), did.level, tstrerror(terrno)); - return -1; - } - - if (tsdbUpdateDFileSet(pfs, &nSet) < 0) { - return -1; - } - - tsdbInfo("vgId:%d FSET %d is copied from level %d disk id %d to level %d disk id %d", REPO_ID(pRepo), pSet->fid, - TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet), did.level, did.id); - } else { - // On a correct level - if (tsdbUpdateDFileSet(pfs, pSet) < 0) { - return -1; - } - } - - return 0; -} \ No newline at end of file From afc352547714d73172b6936f4a3a9fea77828744 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 2 Mar 2021 09:21:27 +0800 Subject: [PATCH 03/82] fix return value check problem --- src/tsdb/src/tsdbCommit.c | 4 ++-- src/tsdb/src/tsdbFS.c | 2 +- src/tsdb/src/tsdbReadImpl.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 5656db69a4..e1f3cbb5f5 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -1188,12 +1188,12 @@ static int tsdbMoveBlock(SCommitH *pCommith, int bidx) { } static int tsdbCommitAddBlock(SCommitH *pCommith, const SBlock *pSupBlock, const SBlock *pSubBlocks, int nSubBlocks) { - if (taosArrayPush(pCommith->aSupBlk, pSupBlock) < 0) { + if (taosArrayPush(pCommith->aSupBlk, pSupBlock) == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - if (pSubBlocks && taosArrayPushBatch(pCommith->aSubBlk, pSubBlocks, nSubBlocks) < 0) { + if (pSubBlocks && taosArrayPushBatch(pCommith->aSubBlk, pSubBlocks, nSubBlocks) == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index cbff4fbeaa..17d5f8f66f 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -1045,7 +1045,7 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { int code = regexec(®ex, bname, 0, NULL, 0); if (code == 0) { - if (taosArrayPush(fArray, (void *)pf) < 0) { + if (taosArrayPush(fArray, (void *)pf) == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; tfsClosedir(tdir); taosArrayDestroy(fArray); diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 572706d45e..7212ae1636 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -139,7 +139,7 @@ int tsdbLoadBlockIdx(SReadH *pReadh) { ptr = tsdbDecodeSBlockIdx(ptr, &blkIdx); ASSERT(ptr != NULL); - if (taosArrayPush(pReadh->aBlkIdx, (void *)(&blkIdx)) < 0) { + if (taosArrayPush(pReadh->aBlkIdx, (void *)(&blkIdx)) == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } From 3449f023ac6ec84e4d65979734027636649484f0 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 2 Mar 2021 13:30:11 +0800 Subject: [PATCH 04/82] change merge datacols interface --- src/common/inc/tdataformat.h | 2 +- src/common/src/tdataformat.c | 15 ++++++++++----- src/tsdb/src/tsdbReadImpl.c | 4 ++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 959654d158..308f5a1e58 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -296,7 +296,7 @@ int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); SDataCols *tdFreeDataCols(SDataCols *pCols); void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols); -int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge); +int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset); // ----------------- K-V data row structure /* diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index f5b84e4c9a..db73905119 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -441,30 +441,35 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) pCols->numOfRows++; } -int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge) { +int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); + int offset = 0; + + if (pOffset == NULL) { + pOffset = &offset; + } SDataCols *pTarget = NULL; - if (dataColsKeyLast(target) < dataColsKeyFirst(source)) { // No overlap + if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyFirst(source))) { // No overlap ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); for (int i = 0; i < rowsToMerge; i++) { for (int j = 0; j < source->numOfCols; j++) { if (source->cols[j].len > 0) { - dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i), target->numOfRows, + dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows, target->maxPoints); } } target->numOfRows++; + (*pOffset)++; } } else { pTarget = tdDupDataCols(target, true); if (pTarget == NULL) goto _err; int iter1 = 0; - int iter2 = 0; - tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, &iter2, source->numOfRows, + tdMergeTwoDataCols(target, pTarget, &iter1, pTarget->numOfRows, source, pOffset, source->numOfRows, pTarget->numOfRows + rowsToMerge); } diff --git a/src/tsdb/src/tsdbReadImpl.c b/src/tsdb/src/tsdbReadImpl.c index 7212ae1636..dd14dc700f 100644 --- a/src/tsdb/src/tsdbReadImpl.c +++ b/src/tsdb/src/tsdbReadImpl.c @@ -258,7 +258,7 @@ int tsdbLoadBlockData(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo) { for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; if (tsdbLoadBlockDataImpl(pReadh, iBlock, pReadh->pDCols[1]) < 0) return -1; - if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1; } ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); @@ -284,7 +284,7 @@ int tsdbLoadBlockDataCols(SReadH *pReadh, SBlock *pBlock, SBlockInfo *pBlkInfo, for (int i = 1; i < pBlock->numOfSubBlocks; i++) { iBlock++; if (tsdbLoadBlockDataColsImpl(pReadh, iBlock, pReadh->pDCols[1], colIds, numOfColsIds) < 0) return -1; - if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows) < 0) return -1; + if (tdMergeDataCols(pReadh->pDCols[0], pReadh->pDCols[1], pReadh->pDCols[1]->numOfRows, NULL) < 0) return -1; } ASSERT(pReadh->pDCols[0]->numOfRows == pBlock->numOfRows); From bd8cb525559b28582b7a890636562bc0afdbbd39 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 5 Mar 2021 13:25:56 +0800 Subject: [PATCH 05/82] make compact as a plugin interface --- src/tsdb/CMakeLists.txt | 4 ++++ src/tsdb/src/tsdbCompact.c | 6 +++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index 31d52aae7d..8080a61a6c 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -6,6 +6,10 @@ AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tsdb ${SRC}) TARGET_LINK_LIBRARIES(tsdb tfs common tutil) +IF (TD_TSDB_PLUGINS) + TARGET_LINK_LIBRARIES(tsdb tsdbPlugins) +ENDIF () + IF (TD_LINUX) # Someone has no gtest directory, so comment it # ADD_SUBDIRECTORY(tests) diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index d8cd558424..635bba388a 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -14,5 +14,9 @@ */ #include "tsdb.h" +#ifndef _TSDB_PLUGINS + int tsdbCompact(STsdbRepo *pRepo) { return 0; } -void *tsdbCompactImpl(STsdbRepo *pRepo) { return NULL; } \ No newline at end of file +void *tsdbCompactImpl(STsdbRepo *pRepo) { return NULL; } + +#endif \ No newline at end of file From 261e50e23e5bc34723032c4f96e8908d24bb69ce Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Sat, 22 May 2021 15:11:45 +0800 Subject: [PATCH 06/82] cq can continue with output table last row time --- src/client/src/tscStream.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index f0f87f26db..267fa0c0fe 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -539,6 +539,31 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) { return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer; } +// +// get tableName last row time, if have error return zero. +// +static int64_t tscGetStreamTableLastTime(SSqlObj* pSql, SSqlStream* pStream, const char* tableName) { + + int64_t last_time = 0; + char sql[128] = ""; + sprintf(sql, "select last_row(*) from %s;", tableName); + + // query sql + TAOS_RES* res = taos_query(pSql->pTscObj, sql); + if(res == NULL) + return 0; + + // only fetch one row + TAOS_ROW row = taos_fetch_row(res); + if( row[0] ) { + last_time = *((int64_t*)row[0]); + } + + // free and return + taos_free_result(res); + return last_time; +} + static void tscCreateStream(void *param, TAOS_RES *res, int code) { SSqlStream* pStream = (SSqlStream*)param; SSqlObj* pSql = pStream->pSql; @@ -572,6 +597,13 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime); + // set output table last record time to stime if have, why do this, because continue with last break + int64_t last_time = tscGetStreamTableLastTime(pSql, pStream, pStream->dstTable); + if(last_time > 0 && last_time > pStream->stime) { + // can replace stime with last row time + pStream->stime = last_time; + } + int64_t starttime = tscGetLaunchTimestamp(pStream); pCmd->command = TSDB_SQL_SELECT; From 1ee65f6c86bdbfc5402fd7ee0048249e07ac9a2a Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Sat, 22 May 2021 15:39:46 +0800 Subject: [PATCH 07/82] modify stream retry defalut delay from 10ms to 10*1000ms --- src/client/src/tscStream.c | 4 ++-- src/common/inc/tglobal.h | 2 +- src/common/src/tglobal.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 267fa0c0fe..0f6a403582 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -48,8 +48,8 @@ static bool isProjectStream(SQueryInfo* pQueryInfo) { static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, int16_t prec) { float retryRangeFactor = 0.3f; - int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor); - retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L; + int64_t retryDelta = (int64_t)(tsRetryStreamCompDelay * retryRangeFactor); + retryDelta = ((rand() % retryDelta) + tsRetryStreamCompDelay) * 1000L; if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') { // change to ms diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 26475834d5..1e66ce3f0c 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -74,7 +74,7 @@ extern int32_t tsMinSlidingTime; extern int32_t tsMinIntervalTime; extern int32_t tsMaxStreamComputDelay; extern int32_t tsStreamCompStartDelay; -extern int32_t tsStreamCompRetryDelay; +extern int32_t tsRetryStreamCompDelay; extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window extern int32_t tsProjectExecInterval; extern int64_t tsMaxRetentWindow; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index c3c159ee45..2f18c8f73a 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -92,7 +92,7 @@ int32_t tsMaxStreamComputDelay = 20000; int32_t tsStreamCompStartDelay = 10000; // the stream computing delay time after executing failed, change accordingly -int32_t tsStreamCompRetryDelay = 10; +int32_t tsRetryStreamCompDelay = 10*1000; // The delayed computing ration. 10% of the whole computing time window by default. float tsStreamComputDelayRatio = 0.1f; @@ -696,7 +696,7 @@ static void doInitGlobalConfig(void) { taosInitConfigOption(cfg); cfg.option = "retryStreamCompDelay"; - cfg.ptr = &tsStreamCompRetryDelay; + cfg.ptr = &tsRetryStreamCompDelay; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.minValue = 10; From 75ca521a8618defa6e68260d4de896bc3132fb62 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 26 May 2021 11:00:29 +0800 Subject: [PATCH 08/82] [td-225] add log for debug purpose --- src/client/src/tscParseInsert.c | 11 ++++------- src/client/src/tscServer.c | 2 +- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index f09ca351ff..24031c8ce2 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1348,15 +1348,10 @@ int tsParseSql(SSqlObj *pSql, bool initial) { } // make a backup as tsParseInsertSql may modify the string - char* sqlstr = strdup(pSql->sqlstr); ret = tsParseInsertSql(pSql); - if ((sqlstr == NULL) || (pSql->parseRetry >= 1) || - (ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) { - free(sqlstr); + if ((pSql->parseRetry >= 1) || (ret != TSDB_CODE_TSC_SQL_SYNTAX_ERROR && ret != TSDB_CODE_TSC_INVALID_SQL)) { } else { tscResetSqlCmd(pCmd, true); - free(pSql->sqlstr); - pSql->sqlstr = sqlstr; pSql->parseRetry++; if ((ret = tsInsertInitialCheck(pSql)) == TSDB_CODE_SUCCESS) { ret = tsParseInsertSql(pSql); @@ -1365,9 +1360,11 @@ int tsParseSql(SSqlObj *pSql, bool initial) { } else { SSqlInfo SQLInfo = qSqlParse(pSql->sqlstr); ret = tscToSQLCmd(pSql, &SQLInfo); - if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0 && SQLInfo.type == TSDB_SQL_NULL) { + if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0/* && SQLInfo.type == TSDB_SQL_NULL*/) { + tscDebug("0x%"PRIx64 " parse sql failed, retry again after clear local meta cache", pSql->self); tscResetSqlCmd(pCmd, true); pSql->parseRetry++; + ret = tscToSQLCmd(pSql, &SQLInfo); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 648b95180e..3f47cdba1f 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2517,7 +2517,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn pNew->fp = tscTableMetaCallBack; pNew->param = (void *)pSql->self; - tscDebug("0x%"PRIx64" metaRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->metaRid, pNew->self); + tscDebug("0x%"PRIx64" metaRid from %" PRId64 " to 0x%" PRIx64 , pSql->self, pSql->metaRid, pNew->self); pSql->metaRid = pNew->self; From 70720a2a18de2b2be13ad296fade2a4918da251c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 26 May 2021 17:20:58 +0800 Subject: [PATCH 09/82] [td-225] fix bug found by regression test. --- src/client/src/tscParseInsert.c | 10 +++++----- src/client/src/tscSQLParser.c | 8 ++++++-- src/util/inc/ttoken.h | 2 ++ src/util/src/ttokenizer.c | 12 ++++++++++++ 4 files changed, 25 insertions(+), 7 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 24031c8ce2..f54237306c 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1358,17 +1358,17 @@ int tsParseSql(SSqlObj *pSql, bool initial) { } } } else { - SSqlInfo SQLInfo = qSqlParse(pSql->sqlstr); - ret = tscToSQLCmd(pSql, &SQLInfo); - if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0/* && SQLInfo.type == TSDB_SQL_NULL*/) { + SSqlInfo sqlInfo = qSqlParse(pSql->sqlstr); + ret = tscToSQLCmd(pSql, &sqlInfo); + if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0/* && sqlInfo.type == TSDB_SQL_NULL*/) { tscDebug("0x%"PRIx64 " parse sql failed, retry again after clear local meta cache", pSql->self); tscResetSqlCmd(pCmd, true); pSql->parseRetry++; - ret = tscToSQLCmd(pSql, &SQLInfo); + ret = tscToSQLCmd(pSql, &sqlInfo); } - SqlInfoDestroy(&SQLInfo); + SqlInfoDestroy(&sqlInfo); } /* diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index c472b08dc0..7efa593808 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -361,11 +361,15 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg2 = "name too long"; SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt); - if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) { + + char buf[TSDB_DB_NAME_LEN] = {0}; + SStrToken token = taosTokenDup(&pCreateDB->dbname, buf, tListLen(buf)); + + if (tscValidateName(&token) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname)); + int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &token); if (ret != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } diff --git a/src/util/inc/ttoken.h b/src/util/inc/ttoken.h index c1e2170ac3..3e7c5f3129 100644 --- a/src/util/inc/ttoken.h +++ b/src/util/inc/ttoken.h @@ -185,6 +185,8 @@ static FORCE_INLINE int32_t tGetNumericStringType(const SStrToken* pToken) { void taosCleanupKeywordsTable(); +SStrToken taosTokenDup(SStrToken* pToken, char* buf, int32_t len); + #ifdef __cplusplus } #endif diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 54da75cae0..3448e8e2ba 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -664,3 +664,15 @@ void taosCleanupKeywordsTable() { taosHashCleanup(m); } } + +SStrToken taosTokenDup(SStrToken* pToken, char* buf, int32_t len) { + assert(pToken != NULL && buf != NULL); + SStrToken token = *pToken; + token.z = buf; + + assert(len > token.n); + strncpy(token.z, pToken->z, pToken->n); + token.z[token.n] = 0; + + return token; +} From f0a7a53375c9757dff06ec702d4b1e798d16125f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 26 May 2021 19:49:54 +0800 Subject: [PATCH 10/82] [td-225] --- src/client/src/tscSQLParser.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 7efa593808..f8aa444d9e 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -361,6 +361,9 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg2 = "name too long"; SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt); + if (pCreateDB->dbname.n >= TSDB_DB_NAME_LEN) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } char buf[TSDB_DB_NAME_LEN] = {0}; SStrToken token = taosTokenDup(&pCreateDB->dbname, buf, tListLen(buf)); From 4b4199ce415076bdb4f0a7244e9aef21fb876fe5 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Wed, 26 May 2021 20:50:00 +0800 Subject: [PATCH 11/82] cq support continue query from last stop time --- src/client/inc/tsclient.h | 1 + src/client/src/tscStream.c | 41 +++++++++++++++++++++++++++++--------- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 4bfd3bc88f..0e63fa3551 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -422,6 +422,7 @@ typedef struct SSqlStream { int64_t ctime; // stream created time int64_t stime; // stream next executed time int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed + int64_t ltime; // stream last row time in stream table SInterval interval; void * pTimer; diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 0f6a403582..9094f95dfc 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -538,12 +538,11 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) { return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer; } - +///* // // get tableName last row time, if have error return zero. // static int64_t tscGetStreamTableLastTime(SSqlObj* pSql, SSqlStream* pStream, const char* tableName) { - int64_t last_time = 0; char sql[128] = ""; sprintf(sql, "select last_row(*) from %s;", tableName); @@ -555,7 +554,7 @@ static int64_t tscGetStreamTableLastTime(SSqlObj* pSql, SSqlStream* pStream, con // only fetch one row TAOS_ROW row = taos_fetch_row(res); - if( row[0] ) { + if( row && row[0] ) { last_time = *((int64_t*)row[0]); } @@ -563,7 +562,7 @@ static int64_t tscGetStreamTableLastTime(SSqlObj* pSql, SSqlStream* pStream, con taos_free_result(res); return last_time; } - +//*/ static void tscCreateStream(void *param, TAOS_RES *res, int code) { SSqlStream* pStream = (SSqlStream*)param; SSqlObj* pSql = pStream->pSql; @@ -597,10 +596,14 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime); - // set output table last record time to stime if have, why do this, because continue with last break - int64_t last_time = tscGetStreamTableLastTime(pSql, pStream, pStream->dstTable); + // set output table last record time to stime if have, why do this, because continue with last brea + const char* dstTable = pStream->dstTable? pStream->dstTable: ""; + int64_t last_time = tscGetStreamTableLastTime(pSql, pStream, dstTable); + pStream->ltime = last_time; + tscDebug(" CQ get table=%s lasttime=%"PRId64" end.", dstTable, last_time); if(last_time > 0 && last_time > pStream->stime) { // can replace stime with last row time + tscDebug(" CQ set table %s stime=%"PRId64" with lasttime=%"PRId64" ", dstTable, pStream->stime, last_time); pStream->stime = last_time; } @@ -619,6 +622,24 @@ void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) { pStream->dstTable = dstTable; } +// already run on another thread +void tscCreateStreamThread(SSchedMsg* pMsg) { + tscDebug(" new thread Sched call tscCreateStream begin..."); + tscCreateStream(pMsg->ahandle, NULL, 0); + tscDebug(" new thread Sched call tscCreateStream end."); + return ; +} + +// parsesql async response return and change run thread +void tsParseSqlRet(void* param, TAOS_RES* res, int code) { + SSchedMsg schedMsg = { 0 }; + schedMsg.fp = tscCreateStreamThread; + schedMsg.ahandle = param; + schedMsg.thandle = res; + schedMsg.msg = NULL; + taosScheduleTask(tscQhandle, &schedMsg); +} + TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *)) { STscObj *pObj = (STscObj *)taos; @@ -664,15 +685,17 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); tsem_init(&pSql->rspSem, 0, 0); - pSql->fp = tscCreateStream; - pSql->fetchFp = tscCreateStream; + pSql->fp = tsParseSqlRet; + pSql->fetchFp = tsParseSqlRet; registerSqlObj(pSql); int32_t code = tsParseSql(pSql, true); if (code == TSDB_CODE_SUCCESS) { tscCreateStream(pStream, pSql, code); - } else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + } else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + tscDebug(" cq parseSql IN Process pass. "); + } else { tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code)); taosReleaseRef(tscObjRef, pSql->self); free(pStream); From 0c4075e09fa0c2e40efae77b9ded0a314ad657e2 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 26 May 2021 21:02:14 +0800 Subject: [PATCH 12/82] [TD-4533]: taosdemo resub if resubAfterConsume != -1 (#6243) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 72 +++++++++++++++++++++++++------------ 1 file changed, 50 insertions(+), 22 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 54dd68de44..29a1f7f1f9 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -364,7 +364,7 @@ typedef struct SDbs_S { typedef struct SpecifiedQueryInfo_S { uint64_t queryInterval; // 0: unlimit > 0 loop/s uint32_t concurrent; - uint64_t sqlCount; + int sqlCount; uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms uint64_t queryTimes; @@ -373,6 +373,7 @@ typedef struct SpecifiedQueryInfo_S { char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; int resubAfterConsume[MAX_QUERY_SQL_COUNT]; + int endAfterConsume[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char topic[MAX_QUERY_SQL_COUNT][32]; int consumed[MAX_QUERY_SQL_COUNT]; @@ -391,10 +392,11 @@ typedef struct SuperQueryInfo_S { uint64_t queryTimes; int64_t childTblCount; char childTblPrefix[MAX_TB_NAME_SIZE]; - uint64_t sqlCount; + int sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; int resubAfterConsume; + int endAfterConsume; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; @@ -1717,7 +1719,7 @@ static void printfQueryMeta() { if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) { printf("specified table query info: \n"); - printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { printf("specified tbl query times:\n"); @@ -1737,15 +1739,15 @@ static void printfQueryMeta() { printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n", + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); } printf("super table query info:\n"); - printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); if (g_queryInfo.superQueryInfo.sqlCount > 0) { @@ -4197,7 +4199,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (concurrent && concurrent->type == cJSON_Number) { if (concurrent->valueint <= 0) { errorPrint( - "%s() LN%d, query sqlCount %"PRIu64" or concurrent %d is not correct.\n", + "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); @@ -4296,6 +4298,17 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + cJSON* endAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); + if (endAfterConsume + && endAfterConsume->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] + = endAfterConsume->valueint; + } else if (!endAfterConsume) { + // default value is -1, which mean infinite loop + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + } + cJSON* resubAfterConsume = cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); if (resubAfterConsume @@ -4303,9 +4316,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = resubAfterConsume->valueint; } else if (!resubAfterConsume) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = 1; + // default value is -1, which mean do not resub + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; } cJSON *result = cJSON_GetObjectItem(sql, "result"); @@ -4449,16 +4461,26 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; } + cJSON* superEndAfterConsume = + cJSON_GetObjectItem(superQuery, "endAfterConsume"); + if (superEndAfterConsume + && superEndAfterConsume->type == cJSON_Number) { + g_queryInfo.superQueryInfo.endAfterConsume = + superEndAfterConsume->valueint; + } else if (!superEndAfterConsume) { + // default value is -1, which mean do not resub + g_queryInfo.superQueryInfo.endAfterConsume = -1; + } + cJSON* superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "resubAfterConsume"); + cJSON_GetObjectItem(superQuery, "endAfterConsume"); if (superResubAfterConsume && superResubAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.resubAfterConsume = + g_queryInfo.superQueryInfo.endAfterConsume = superResubAfterConsume->valueint; } else if (!superResubAfterConsume) { - //printf("failed to read json, subscribe interval no found\n"); - ////goto PARSE_OVER; - g_queryInfo.superQueryInfo.resubAfterConsume = 1; + // default value is -1, which mean do not resub + g_queryInfo.superQueryInfo.endAfterConsume = -1; } // supert table sqls @@ -6679,7 +6701,10 @@ static void *superSubscribe(void *sarg) { uint64_t st = 0, et = 0; - while(1) { + while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) + || (g_queryInfo.superQueryInfo.endAfterConsume < + consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from])) { + for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { tsubSeq = i - pThreadInfo->start_table_from; @@ -6708,7 +6733,7 @@ static void *superSubscribe(void *sarg) { } consumed[tsubSeq] ++; - if ((g_queryInfo.superQueryInfo.subscribeKeepProgress) + if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) && (consumed[tsubSeq] >= g_queryInfo.superQueryInfo.resubAfterConsume)) { printf("keepProgress:%d, resub super table query: %"PRIu64"\n", @@ -6790,7 +6815,10 @@ static void *specifiedSubscribe(void *sarg) { // start loop to consume result g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - while(1) { + while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1) + || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < + g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) { + if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { continue; } @@ -6806,7 +6834,7 @@ static void *specifiedSubscribe(void *sarg) { } g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; - if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress) + if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >= g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { printf("keepProgress:%d, resub specified query: %"PRIu64"\n", @@ -6873,12 +6901,12 @@ static int subscribeTestProcess() { //==== create threads for query for specified table if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", + debugPrint("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); } else { if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", + errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); exit(-1); @@ -6911,7 +6939,7 @@ static int subscribeTestProcess() { //==== create threads for super table query if (g_queryInfo.superQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, super table query sqlCount %"PRIu64".\n", + debugPrint("%s() LN%d, super table query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.superQueryInfo.sqlCount); } else { From fba02adc526241ccad861186160acc80f47ab44a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 27 May 2021 01:29:01 +0800 Subject: [PATCH 13/82] [TD-4355] --- src/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index c3c159ee45..b0f83dc707 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -46,7 +46,7 @@ char tsEmail[TSDB_FQDN_LEN] = {0}; int32_t tsDnodeId = 0; // common -int32_t tsRpcTimer = 1000; +int32_t tsRpcTimer = 300; int32_t tsRpcMaxTime = 600; // seconds; int32_t tsMaxShellConns = 50000; int32_t tsMaxConnections = 5000; From 44b5fc4732f97867c910fae80bcc2616c49a6e73 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 27 May 2021 11:18:04 +0800 Subject: [PATCH 14/82] [td-4372]: fix the taosd server crash caused by dropping a child table, of which the indexed tag value is null. --- src/tsdb/src/tsdbMeta.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 3e6263b9d3..b074b04522 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -928,6 +928,11 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) { STColumn *pCol = schemaColAt(pSchema, DEFAULT_TAG_INDEX_COLUMN); char * key = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId); + if (key == NULL) { + // treat the column as NULL if we cannot find it + key = getNullValue(pCol->type); + } + SArray *res = tSkipListGet(pSTable->pIndex, key); size_t size = taosArrayGetSize(res); From ab6e36e950c00c18158d38404c8f3a75ce9114e4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 27 May 2021 11:22:48 +0800 Subject: [PATCH 15/82] [td-4372] refactor. --- src/tsdb/src/tsdbMeta.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index b074b04522..0a4ea5e153 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -924,15 +924,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) { STable *pSTable = pTable->pSuper; ASSERT(pSTable != NULL); - STSchema *pSchema = tsdbGetTableTagSchema(pTable); - STColumn *pCol = schemaColAt(pSchema, DEFAULT_TAG_INDEX_COLUMN); - - char * key = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId); - if (key == NULL) { - // treat the column as NULL if we cannot find it - key = getNullValue(pCol->type); - } - + char* key = getTagIndexKey(pTable); SArray *res = tSkipListGet(pSTable->pIndex, key); size_t size = taosArrayGetSize(res); From c40fc27343c5461b2c8de44d1bcf15b422341bba Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 27 May 2021 14:46:15 +0800 Subject: [PATCH 16/82] [TD-3279]: memory link while perform delete action --- src/connector/go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/connector/go b/src/connector/go index 8ce6d86558..7a26c432f8 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 8ce6d86558afc8c0b50c10f990fd2b4270cf06fc +Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5 From 9b385052e4455329343d5358ed231e98e4b0c111 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 27 May 2021 14:46:24 +0800 Subject: [PATCH 17/82] [TD-3279]: memory link while perform delete action --- src/inc/taoserror.h | 2 +- src/mnode/src/mnodeSdb.c | 19 +++++++++---------- src/util/src/terror.c | 1 + 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index b3e5b59627..bfeb53513b 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -215,11 +215,11 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit") #define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping") #define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing") +#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing") #define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") #define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") #define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") #define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state") -#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing") // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index cc088e3409..a65e29f1ee 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -649,8 +649,6 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * dnodeReportStep("mnode-sdb", stepDesc, 0); } - if (qtype == TAOS_QTYPE_QUERY) return sdbPerformDeleteAction(pHead, pTable); - pthread_mutex_lock(&tsSdbMgmt.mutex); if (pHead->version == 0) { @@ -712,13 +710,11 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * if (action == SDB_ACTION_INSERT) { return sdbPerformInsertAction(pHead, pTable); } else if (action == SDB_ACTION_DELETE) { - //if (qtype == TAOS_QTYPE_FWD) { - // Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue - // sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused); - // return TSDB_CODE_SUCCESS; - //} else { - return sdbPerformDeleteAction(pHead, pTable); - //} + if (qtype == TAOS_QTYPE_FWD) { + // Drop database/stable may take a long time and cause a timeout, so we confirm first + syncConfirmForward(tsSdbMgmt.sync, pHead->version, TSDB_CODE_SUCCESS, false); + } + return sdbPerformDeleteAction(pHead, pTable); } else if (action == SDB_ACTION_UPDATE) { return sdbPerformUpdateAction(pHead, pTable); } else { @@ -1125,7 +1121,10 @@ static void *sdbWorkerFp(void *pWorker) { sdbConfirmForward(1, pRow, pRow->code); } else { if (qtype == TAOS_QTYPE_FWD) { - syncConfirmForward(tsSdbMgmt.sync, pRow->pHead.version, pRow->code, false); + int32_t action = pRow->pHead.msgType % 10; + if (action != SDB_ACTION_DELETE) { + syncConfirmForward(tsSdbMgmt.sync, pRow->pHead.version, pRow->code, false); + } } sdbFreeFromQueue(pRow); } diff --git a/src/util/src/terror.c b/src/util/src/terror.c index fc24b28dc3..9594022d3a 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -227,6 +227,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, "Database memory is fu TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, "Database memory is full for waiting commit") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, "Database is dropping") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_CLOSING, "Database is closing") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing") From 34d6383a3c2f51102bed107cfd07cfcfa6f43e3c Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 27 May 2021 15:58:59 +0800 Subject: [PATCH 18/82] [TD-4122]: add test case --- tests/pytest/fulltest.sh | 2 +- tests/pytest/table/tablename-boundary.py | 62 ++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 6 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index d8e2a31e70..f120bba536 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -31,7 +31,7 @@ python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py python3 ./test.py -f table/db_table.py python3 ./test.py -f table/create_sensitive.py -#python3 ./test.py -f table/tablename-boundary.py +python3 ./test.py -f table/tablename-boundary.py python3 ./test.py -f table/max_table_length.py python3 ./test.py -f table/alter_column.py python3 ./test.py -f table/boundary.py diff --git a/tests/pytest/table/tablename-boundary.py b/tests/pytest/table/tablename-boundary.py index 0755e75355..8766a9d4a9 100644 --- a/tests/pytest/table/tablename-boundary.py +++ b/tests/pytest/table/tablename-boundary.py @@ -14,6 +14,13 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + self.ts = 1622100000000 + + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + def run(self): tdSql.prepare() @@ -24,19 +31,64 @@ class TDTestCase: shell=True)) - 1 tdLog.info("table name max length is %d" % tableNameMaxLen) chars = string.ascii_uppercase + string.ascii_lowercase - tb_name = ''.join(random.choices(chars, k=tableNameMaxLen)) + tb_name = ''.join(random.choices(chars, k=tableNameMaxLen + 1)) tdLog.info('tb_name length %d' % len(tb_name)) tdLog.info('create table %s (ts timestamp, value int)' % tb_name) - tdSql.error( - 'create table %s (ts timestamp, speed binary(4089))' % - tb_name) + tdSql.error('create table %s (ts timestamp, speed binary(4089))' % tb_name) - tb_name = ''.join(random.choices(chars, k=191)) + tb_name = ''.join(random.choices(chars, k=tableNameMaxLen)) tdLog.info('tb_name length %d' % len(tb_name)) tdLog.info('create table %s (ts timestamp, value int)' % tb_name) tdSql.execute( 'create table %s (ts timestamp, speed binary(4089))' % tb_name) + + db_name = self.get_random_string(33) + tdSql.error("create database %s" % db_name) + + db_name = self.get_random_string(32) + tdSql.execute("create database %s" % db_name) + tdSql.execute("use %s" % db_name) + + tb_name = self.get_random_string(193) + tdSql.error("create table %s(ts timestamp, val int)" % tb_name) + + tb_name = self.get_random_string(192) + tdSql.execute("create table %s.%s(ts timestamp, val int)" % (db_name, tb_name)) + tdSql.query("show %s.tables" % db_name) + tdSql.checkRows(1) + tdSql.checkData(0, 0, tb_name) + + tdSql.execute("insert into %s.%s values(now, 1)" % (db_name, tb_name)) + tdSql.query("select * from %s.%s" %(db_name, tb_name)) + tdSql.checkRows(1) + + db_name = self.get_random_string(32) + tdSql.execute("create database %s update 1" % db_name) + + stb_name = self.get_random_string(192) + tdSql.execute("create table %s.%s(ts timestamp, val int) tags(id int)" % (db_name, stb_name)) + tb_name1 = self.get_random_string(192) + tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name1, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2)) + tb_name2 = self.get_random_string(192) + tdSql.execute("insert into %s.%s using %s.%s tags(2) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name2, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2)) + + tdSql.query("show %s.tables" % db_name) + tdSql.checkRows(2) + tdSql.checkData(0, 0, tb_name1) + tdSql.checkData(1, 0, tb_name2) + + tdSql.query("select * from %s.%s" % (db_name, stb_name)) + tdSql.checkRows(6) + + tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, null)" % (db_name, tb_name1, db_name, stb_name, self.ts)) + + tdSql.query("select * from %s.%s" % (db_name, stb_name)) + tdSql.checkRows(6) + + + + def stop(self): tdSql.close() From 998680015d74f41377bc767c4343bc9565a357e3 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Thu, 27 May 2021 17:44:57 +0800 Subject: [PATCH 19/82] [TD-4370]: squashed commit of python connector changes in develop (#6246) commit 0c81e3c0ba89c86f6d17d51d5056146e177cace5 Author: Huo Linhe Date: Fri May 14 10:27:13 2021 +0800 [TD-4160]: remove python connector soft links, fix tests and documents commit f439fce413423e7d7c97eef180c93d63dedab32e Author: Huo Linhe Date: Wed May 12 18:50:53 2021 +0800 [TD-182]: bump python connector version to v2.0.10 (#6091) Also fix url error in setup.py commit 4ae55d494a262e482f6b34e8bf193cc6af62a886 Author: Huo Linhe Date: Wed May 12 09:49:26 2021 +0800 [TD-4370]: merge python connector changes to master Inludes: [TD-182]: use single repo for python connector (#6036) * [TD-182]: use single repo for python connector Remove code for each platform and build up one single python code base for windows/osx/linux platforms and python2/python3 runtime. * [TD-182]: remove redundant code in python connector * [TD-4149] : fix python connection config error --- Jenkinsfile | 2 +- documentation20/cn/08.connector/docs.md | 17 +- packaging/tools/makeclient_power.sh | 15 +- packaging/tools/makepkg_power.sh | 15 +- src/connector/python/.gitignore | 154 +++++ .../python/{linux/python2 => }/LICENSE | 0 src/connector/python/README.md | 17 + src/connector/python/examples/demo.py | 12 + src/connector/python/linux/python2/README.md | 1 - src/connector/python/linux/python2/setup.py | 20 - .../python/linux/python2/taos/cinterface.py | 648 ------------------ .../python/linux/python2/taos/cursor.py | 278 -------- src/connector/python/linux/python3/LICENSE | 12 - src/connector/python/linux/python3/README.md | 1 - src/connector/python/linux/python3/setup.py | 20 - .../python/linux/python3/taos/__init__.py | 25 - .../python/linux/python3/taos/connection.py | 95 --- .../python/linux/python3/taos/constants.py | 42 -- .../python/linux/python3/taos/dbapi.py | 44 -- .../python/linux/python3/taos/error.py | 66 -- .../python/linux/python3/taos/subscription.py | 57 -- src/connector/python/osx/python3/LICENSE | 12 - src/connector/python/osx/python3/README.md | 1 - src/connector/python/osx/python3/setup.py | 20 - .../python/osx/python3/taos/__init__.py | 24 - .../python/osx/python3/taos/cinterface.py | 648 ------------------ .../python/osx/python3/taos/connection.py | 95 --- .../python/osx/python3/taos/constants.py | 42 -- .../python/osx/python3/taos/cursor.py | 280 -------- .../python/osx/python3/taos/dbapi.py | 44 -- .../python/osx/python3/taos/error.py | 66 -- .../python/osx/python3/taos/subscription.py | 57 -- src/connector/python/setup.py | 34 + .../{linux/python2 => }/taos/__init__.py | 0 .../{linux/python3 => }/taos/cinterface.py | 253 +++---- .../{linux/python2 => }/taos/connection.py | 0 .../{linux/python2 => }/taos/constants.py | 0 .../python/{linux/python3 => }/taos/cursor.py | 6 + .../python/{linux/python2 => }/taos/dbapi.py | 0 .../python/{linux/python2 => }/taos/error.py | 0 .../{linux/python2 => }/taos/subscription.py | 0 src/connector/python/windows/python2/LICENSE | 12 - .../python/windows/python2/README.md | 1 - src/connector/python/windows/python2/setup.py | 20 - .../python/windows/python2/taos/__init__.py | 24 - .../python/windows/python2/taos/cinterface.py | 648 ------------------ .../python/windows/python2/taos/connection.py | 96 --- .../python/windows/python2/taos/constants.py | 42 -- .../python/windows/python2/taos/cursor.py | 220 ------ .../python/windows/python2/taos/dbapi.py | 44 -- .../python/windows/python2/taos/error.py | 66 -- .../windows/python2/taos/subscription.py | 57 -- src/connector/python/windows/python3/LICENSE | 12 - .../python/windows/python3/README.md | 1 - src/connector/python/windows/python3/setup.py | 20 - .../python/windows/python3/taos/__init__.py | 24 - .../python/windows/python3/taos/cinterface.py | 648 ------------------ .../python/windows/python3/taos/connection.py | 96 --- .../python/windows/python3/taos/constants.py | 42 -- .../python/windows/python3/taos/cursor.py | 220 ------ .../python/windows/python3/taos/dbapi.py | 44 -- .../python/windows/python3/taos/error.py | 66 -- .../windows/python3/taos/subscription.py | 57 -- ...o-Run-Test-And-How-To-Add-New-Test-Case.md | 4 +- tests/Jenkinsfile | 2 +- tests/pytest/concurrent_inquiry.sh | 2 +- tests/pytest/crash_gen.sh | 2 +- tests/pytest/hivemq-extension-test.py | 2 +- tests/pytest/perf_gen.sh | 2 +- tests/pytest/simpletest_no_sudo.sh | 2 +- tests/pytest/test.py | 2 +- tests/pytest/test.sh | 2 +- tests/pytest/testCompress.py | 2 +- tests/pytest/testMinTablesPerVnode.py | 2 +- tests/pytest/testNoCompress.py | 2 +- 75 files changed, 341 insertions(+), 5268 deletions(-) create mode 100644 src/connector/python/.gitignore rename src/connector/python/{linux/python2 => }/LICENSE (100%) create mode 100644 src/connector/python/README.md create mode 100644 src/connector/python/examples/demo.py delete mode 100644 src/connector/python/linux/python2/README.md delete mode 100644 src/connector/python/linux/python2/setup.py delete mode 100644 src/connector/python/linux/python2/taos/cinterface.py delete mode 100644 src/connector/python/linux/python2/taos/cursor.py delete mode 100644 src/connector/python/linux/python3/LICENSE delete mode 100644 src/connector/python/linux/python3/README.md delete mode 100644 src/connector/python/linux/python3/setup.py delete mode 100644 src/connector/python/linux/python3/taos/__init__.py delete mode 100644 src/connector/python/linux/python3/taos/connection.py delete mode 100644 src/connector/python/linux/python3/taos/constants.py delete mode 100644 src/connector/python/linux/python3/taos/dbapi.py delete mode 100644 src/connector/python/linux/python3/taos/error.py delete mode 100644 src/connector/python/linux/python3/taos/subscription.py delete mode 100644 src/connector/python/osx/python3/LICENSE delete mode 100644 src/connector/python/osx/python3/README.md delete mode 100644 src/connector/python/osx/python3/setup.py delete mode 100644 src/connector/python/osx/python3/taos/__init__.py delete mode 100644 src/connector/python/osx/python3/taos/cinterface.py delete mode 100644 src/connector/python/osx/python3/taos/connection.py delete mode 100644 src/connector/python/osx/python3/taos/constants.py delete mode 100644 src/connector/python/osx/python3/taos/cursor.py delete mode 100644 src/connector/python/osx/python3/taos/dbapi.py delete mode 100644 src/connector/python/osx/python3/taos/error.py delete mode 100644 src/connector/python/osx/python3/taos/subscription.py create mode 100644 src/connector/python/setup.py rename src/connector/python/{linux/python2 => }/taos/__init__.py (100%) rename src/connector/python/{linux/python3 => }/taos/cinterface.py (70%) rename src/connector/python/{linux/python2 => }/taos/connection.py (100%) rename src/connector/python/{linux/python2 => }/taos/constants.py (100%) rename src/connector/python/{linux/python3 => }/taos/cursor.py (98%) rename src/connector/python/{linux/python2 => }/taos/dbapi.py (100%) rename src/connector/python/{linux/python2 => }/taos/error.py (100%) rename src/connector/python/{linux/python2 => }/taos/subscription.py (100%) delete mode 100644 src/connector/python/windows/python2/LICENSE delete mode 100644 src/connector/python/windows/python2/README.md delete mode 100644 src/connector/python/windows/python2/setup.py delete mode 100644 src/connector/python/windows/python2/taos/__init__.py delete mode 100644 src/connector/python/windows/python2/taos/cinterface.py delete mode 100644 src/connector/python/windows/python2/taos/connection.py delete mode 100644 src/connector/python/windows/python2/taos/constants.py delete mode 100644 src/connector/python/windows/python2/taos/cursor.py delete mode 100644 src/connector/python/windows/python2/taos/dbapi.py delete mode 100644 src/connector/python/windows/python2/taos/error.py delete mode 100644 src/connector/python/windows/python2/taos/subscription.py delete mode 100644 src/connector/python/windows/python3/LICENSE delete mode 100644 src/connector/python/windows/python3/README.md delete mode 100644 src/connector/python/windows/python3/setup.py delete mode 100644 src/connector/python/windows/python3/taos/__init__.py delete mode 100644 src/connector/python/windows/python3/taos/cinterface.py delete mode 100644 src/connector/python/windows/python3/taos/connection.py delete mode 100644 src/connector/python/windows/python3/taos/constants.py delete mode 100644 src/connector/python/windows/python3/taos/cursor.py delete mode 100644 src/connector/python/windows/python3/taos/dbapi.py delete mode 100644 src/connector/python/windows/python3/taos/error.py delete mode 100644 src/connector/python/windows/python3/taos/subscription.py diff --git a/Jenkinsfile b/Jenkinsfile index 33ce784bce..b48dca0241 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -94,7 +94,7 @@ def pre_test(){ make > /dev/null make install > /dev/null cd ${WKC}/tests - pip3 install ${WKC}/src/connector/python/linux/python3/ + pip3 install ${WKC}/src/connector/python ''' return 1 } diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 6811315e7d..5a6c26587a 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -399,27 +399,22 @@ Python连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/ #### Linux -用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到python2和python3的connector安装包。用户可以通过pip命令安装: +用户可以在源代码的src/connector/python(或者tar.gz的/connector/python)文件夹下找到connector安装包。用户可以通过pip命令安装: -​ `pip install src/connector/python/linux/python2/` +​ `pip install src/connector/python/` 或 -​ `pip3 install src/connector/python/linux/python3/` +​ `pip3 install src/connector/python/` #### Windows 在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos cmd 命令行界面 ```cmd -cd C:\TDengine\connector\python\windows -python -m pip install python2\ -``` -或 -```cmd -cd C:\TDengine\connector\python\windows -python -m pip install python3\ +cd C:\TDengine\connector\python +python -m pip install . ``` -* 如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。 +* 如果机器上没有pip命令,用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。 对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。 ### 使用 diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index b5649e019e..8241319e4f 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -156,20 +156,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then fi cp -r ${connector_dir}/python ${install_dir}/connector - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py + sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py + sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py + sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py fi # Copy release note # cp ${script_dir}/release_note ${install_dir} diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 1e43f775e2..633a135c14 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -179,20 +179,11 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then fi cp -r ${connector_dir}/python ${install_dir}/connector/ - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/cinterface.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/cinterface.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/cinterface.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/cinterface.py + sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/cinterface.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/subscription.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/subscription.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/subscription.py - sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/subscription.py + sed -i '/password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/subscription.py - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python2/taos/connection.py - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/linux/python3/taos/connection.py - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python2/taos/connection.py - sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/windows/python3/taos/connection.py + sed -i '/self._password/ {s/taosdata/powerdb/g}' ${install_dir}/connector/python/taos/connection.py fi # Copy release note # cp ${script_dir}/release_note ${install_dir} diff --git a/src/connector/python/.gitignore b/src/connector/python/.gitignore new file mode 100644 index 0000000000..228a0b4530 --- /dev/null +++ b/src/connector/python/.gitignore @@ -0,0 +1,154 @@ + +# Created by https://www.toptal.com/developers/gitignore/api/python +# Edit at https://www.toptal.com/developers/gitignore?templates=python + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +pytestdebug.log + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +doc/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +#poetry.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +# .env +.env/ +.venv/ +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pythonenv* + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# operating system-related files +# file properties cache/storage on macOS +*.DS_Store +# thumbnail cache on Windows +Thumbs.db + +# profiling data +.prof + + +# End of https://www.toptal.com/developers/gitignore/api/python diff --git a/src/connector/python/linux/python2/LICENSE b/src/connector/python/LICENSE similarity index 100% rename from src/connector/python/linux/python2/LICENSE rename to src/connector/python/LICENSE diff --git a/src/connector/python/README.md b/src/connector/python/README.md new file mode 100644 index 0000000000..9151e9b8f0 --- /dev/null +++ b/src/connector/python/README.md @@ -0,0 +1,17 @@ +# TDengine Connector for Python + +[TDengine] connector for Python enables python programs to access TDengine, using an API which is compliant with the Python DB API 2.0 (PEP-249). It uses TDengine C client library for client server communications. + +## Install + +```sh +pip install git+https://github.com/taosdata/TDengine-connector-python +``` + +## Source Code + +[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine-connector-python). + +## License - AGPL + +Keep same with [TDengine](https://github.com/taosdata/TDengine). diff --git a/src/connector/python/examples/demo.py b/src/connector/python/examples/demo.py new file mode 100644 index 0000000000..6c7c03f3e2 --- /dev/null +++ b/src/connector/python/examples/demo.py @@ -0,0 +1,12 @@ +import taos + +conn = taos.connect(host='127.0.0.1', + user='root', + passworkd='taodata', + database='log') +cursor = conn.cursor() + +sql = "select * from log.log limit 10" +cursor.execute(sql) +for row in cursor: + print(row) diff --git a/src/connector/python/linux/python2/README.md b/src/connector/python/linux/python2/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/linux/python2/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/linux/python2/setup.py b/src/connector/python/linux/python2/setup.py deleted file mode 100644 index 3f065e0348..0000000000 --- a/src/connector/python/linux/python2/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 2", - "Operating System :: Linux", - ], -) diff --git a/src/connector/python/linux/python2/taos/cinterface.py b/src/connector/python/linux/python2/taos/cinterface.py deleted file mode 100644 index 3d0ecd2901..0000000000 --- a/src/connector/python/linux/python2/taos/cinterface.py +++ /dev/null @@ -1,648 +0,0 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli / 1000.0) - - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro / 1000000.0) - - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] - - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - - -def _crow_tinyint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - - -def _crow_smallint_unsigned_to_python( - data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - - -def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - - -def _crow_bigint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) - else: - res.append((ctypes.cast(data + nbytes * i, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - - return res - - -def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - return res - - -def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - return res - - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -_CONVERT_FUNC_BLOCK = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python_block, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -# Corresponding TAOS_FIELD structure in C - - -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 65), - ('type', ctypes.c_char), - ('bytes', ctypes.c_short)] - -# C interface class - - -class CTaosInterface(object): - - libtaos = ctypes.CDLL('libtaos.so') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - libtaos.taos_subscribe.restype = ctypes.c_void_p - libtaos.taos_consume.restype = ctypes.c_void_p - libtaos.taos_fetch_lengths.restype = ctypes.c_void_p - libtaos.taos_free_result.restype = None - libtaos.taos_errno.restype = ctypes.c_int - libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config is not None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect( - self, - host=None, - user="root", - password="taosdata", - db=None, - port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value is None: - print('connect to TDengine failed') - raise ConnectionError("connect to TDengine failed") - # sys.exit(1) - # else: - # print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query( - connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(result): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(result) - - @staticmethod - def subscribe(connection, restart, topic, sql, interval): - """Create a subscription - @restart boolean, - @sql string, sql statement for data query, must be a 'select' statement. - @topic string, name of this subscription - """ - return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( - connection, - 1 if restart else 0, - ctypes.c_char_p(topic.encode('utf-8')), - ctypes.c_char_p(sql.encode('utf-8')), - None, - None, - interval)) - - @staticmethod - def consume(sub): - """Consume data of a subscription - """ - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.libtaos.taos_num_fields(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - return result, fields - - @staticmethod - def unsubscribe(sub, keepProgress): - """Cancel a subscription - """ - CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) - - @staticmethod - def useResult(result): - '''Use result after calling self.query - ''' - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - if num_of_rows == 0: - return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: - raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def fetchRow(result, fields): - pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock: - num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError( - "Invalid data type returned from database") - if data is None: - blocks[i] = [None] - else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - else: - return None, 0 - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(result): - return CTaosInterface.libtaos.taos_field_count(result) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(result): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(result) - - @staticmethod - def errStr(result): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - result = cinter.query(conn, 'show databases') - - print('Query Affected rows: {}'.format(cinter.affectedRows(result))) - - fields = CTaosInterface.useResult(result) - - data, num_of_rows = CTaosInterface.fetchBlock(result, fields) - - print(data) - - cinter.freeResult(result) - cinter.close(conn) diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py deleted file mode 100644 index 4c0456b503..0000000000 --- a/src/connector/python/linux/python2/taos/cursor.py +++ /dev/null @@ -1,278 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * -from .constants import FieldType - - -class TDengineCursor(object): - """Database cursor which is used to manage the context of a fetch operation. - - Attributes: - .description: Read-only attribute consists of 7-item sequences: - - > name (mondatory) - > type_code (mondatory) - > display_size - > internal_size - > precision - > scale - > null_ok - - This attribute will be None for operations that do not return rows or - if the cursor has not had an operation invoked via the .execute*() method yet. - - .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected - """ - - def __init__(self, connection=None): - self._description = [] - self._rowcount = -1 - self._connection = None - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - self._logfile = "" - - if connection is not None: - self._connection = connection - - def __iter__(self): - return self - - def next(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetch iterator") - - if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow( - self._result, self._fields) - if self._block_rows == 0: - raise StopIteration - self._block = list(map(tuple, zip(*block))) - self._block_iter = 0 - - data = self._block[self._block_iter] - self._block_iter += 1 - - return data - - @property - def description(self): - """Return the description of the object. - """ - return self._description - - @property - def rowcount(self): - """Return the rowcount of the object - """ - return self._rowcount - - @property - def affected_rows(self): - """Return the affected_rows of the object - """ - return self._affected_rows - - def callproc(self, procname, *args): - """Call a stored database procedure with the given name. - - Void functionality since no stored procedures. - """ - pass - - def log(self, logfile): - self._logfile = logfile - - def close(self): - """Close the cursor. - """ - if self._connection is None: - return False - - self._reset_result() - self._connection = None - - return True - - def execute(self, operation, params=None): - """Prepare and execute a database operation (query or command). - """ - if not operation: - return None - - if not self._connection: - # TODO : change the exception raised here - raise ProgrammingError("Cursor is not connected") - - self._reset_result() - - stmt = operation - if params is not None: - pass - - # global querySeqNum - # querySeqNum += 1 - # localSeqNum = querySeqNum # avoid raice condition - # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt))) - self._result = CTaosInterface.query(self._connection._conn, stmt) - # print(" << Query ({}) Exec Done".format(localSeqNum)) - if (self._logfile): - with open(self._logfile, "a") as logfile: - logfile.write("%s;\n" % operation) - - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno == 0: - if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows( - self._result) - return CTaosInterface.affectedRows(self._result) - else: - self._fields = CTaosInterface.useResult( - self._result) - return self._handle_result() - else: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - - def executemany(self, operation, seq_of_parameters): - """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. - """ - pass - - def fetchone(self): - """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. - """ - pass - - def fetchmany(self): - pass - - def istype(self, col, dataType): - if (dataType.upper() == "BOOL"): - if (self._description[col][1] == FieldType.C_BOOL): - return True - if (dataType.upper() == "TINYINT"): - if (self._description[col][1] == FieldType.C_TINYINT): - return True - if (dataType.upper() == "TINYINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): - return True - if (dataType.upper() == "SMALLINT"): - if (self._description[col][1] == FieldType.C_SMALLINT): - return True - if (dataType.upper() == "SMALLINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): - return True - if (dataType.upper() == "INT"): - if (self._description[col][1] == FieldType.C_INT): - return True - if (dataType.upper() == "INT UNSIGNED"): - if (self._description[col][1] == FieldType.C_INT_UNSIGNED): - return True - if (dataType.upper() == "BIGINT"): - if (self._description[col][1] == FieldType.C_BIGINT): - return True - if (dataType.upper() == "BIGINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): - return True - if (dataType.upper() == "FLOAT"): - if (self._description[col][1] == FieldType.C_FLOAT): - return True - if (dataType.upper() == "DOUBLE"): - if (self._description[col][1] == FieldType.C_DOUBLE): - return True - if (dataType.upper() == "BINARY"): - if (self._description[col][1] == FieldType.C_BINARY): - return True - if (dataType.upper() == "TIMESTAMP"): - if (self._description[col][1] == FieldType.C_TIMESTAMP): - return True - if (dataType.upper() == "NCHAR"): - if (self._description[col][1] == FieldType.C_NCHAR): - return True - - return False - - def fetchall_row(self): - """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. - """ - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchRow( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def fetchall(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def nextset(self): - """ - """ - pass - - def setinputsize(self, sizes): - pass - - def setutputsize(self, size, column=None): - pass - - def _reset_result(self): - """Reset the result to unused version. - """ - self._description = [] - self._rowcount = -1 - if self._result is not None: - CTaosInterface.freeResult(self._result) - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - - def _handle_result(self): - """Handle the return result from query. - """ - self._description = [] - for ele in self._fields: - self._description.append( - (ele['name'], ele['type'], None, None, None, None, False)) - - return self._result diff --git a/src/connector/python/linux/python3/LICENSE b/src/connector/python/linux/python3/LICENSE deleted file mode 100644 index 79a9d73086..0000000000 --- a/src/connector/python/linux/python3/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ - Copyright (c) 2019 TAOS Data, Inc. - -This program is free software: you can use, redistribute, and/or modify -it under the terms of the GNU Affero General Public License, version 3 -or later ("AGPL"), as published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/src/connector/python/linux/python3/README.md b/src/connector/python/linux/python3/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/linux/python3/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/linux/python3/setup.py b/src/connector/python/linux/python3/setup.py deleted file mode 100644 index 0bd7d51b6a..0000000000 --- a/src/connector/python/linux/python3/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "Operating System :: Linux", - ], -) diff --git a/src/connector/python/linux/python3/taos/__init__.py b/src/connector/python/linux/python3/taos/__init__.py deleted file mode 100644 index 1b086f36ec..0000000000 --- a/src/connector/python/linux/python3/taos/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ - -from .connection import TDengineConnection -from .cursor import TDengineCursor -from .error import Error - -# Globals -threadsafety = 0 -paramstyle = 'pyformat' - -__all__ = ['connection', 'cursor'] - - -def connect(*args, **kwargs): - """ Function to return a TDengine connector object - - Current supporting keyword parameters: - @dsn: Data source name as string - @user: Username as string(optional) - @password: Password as string(optional) - @host: Hostname(optional) - @database: Database name(optional) - - @rtype: TDengineConnector - """ - return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/linux/python3/taos/connection.py b/src/connector/python/linux/python3/taos/connection.py deleted file mode 100644 index f6c395342c..0000000000 --- a/src/connector/python/linux/python3/taos/connection.py +++ /dev/null @@ -1,95 +0,0 @@ -from .cursor import TDengineCursor -from .subscription import TDengineSubscription -from .cinterface import CTaosInterface - - -class TDengineConnection(object): - """ TDengine connection object - """ - - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect( - self._host, - self._user, - self._password, - self._database, - self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def subscribe(self, restart, topic, sql, interval): - """Create a subscription. - """ - if self._conn is None: - return None - sub = CTaosInterface.subscribe( - self._conn, restart, topic, sql, interval) - return TDengineSubscription(sub) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - pass - - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() - print("Hello world") diff --git a/src/connector/python/linux/python3/taos/constants.py b/src/connector/python/linux/python3/taos/constants.py deleted file mode 100644 index 93466f5184..0000000000 --- a/src/connector/python/linux/python3/taos/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Constants in TDengine python -""" - -from .dbapi import * - - -class FieldType(object): - """TDengine Field Types - """ - # type_code - C_NULL = 0 - C_BOOL = 1 - C_TINYINT = 2 - C_SMALLINT = 3 - C_INT = 4 - C_BIGINT = 5 - C_FLOAT = 6 - C_DOUBLE = 7 - C_BINARY = 8 - C_TIMESTAMP = 9 - C_NCHAR = 10 - C_TINYINT_UNSIGNED = 11 - C_SMALLINT_UNSIGNED = 12 - C_INT_UNSIGNED = 13 - C_BIGINT_UNSIGNED = 14 - # NULL value definition - # NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL = 0x02 - C_TINYINT_NULL = -128 - C_TINYINT_UNSIGNED_NULL = 255 - C_SMALLINT_NULL = -32768 - C_SMALLINT_UNSIGNED_NULL = 65535 - C_INT_NULL = -2147483648 - C_INT_UNSIGNED_NULL = 4294967295 - C_BIGINT_NULL = -9223372036854775808 - C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float('nan') - C_DOUBLE_NULL = float('nan') - C_BINARY_NULL = bytearray([int('0xff', 16)]) - # Timestamp precision definition - C_TIMESTAMP_MILLI = 0 - C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/linux/python3/taos/dbapi.py b/src/connector/python/linux/python3/taos/dbapi.py deleted file mode 100644 index 594681ada9..0000000000 --- a/src/connector/python/linux/python3/taos/dbapi.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Type Objects and Constructors. -""" - -import time -import datetime - - -class DBAPITypeObject(object): - def __init__(self, *values): - self.values = values - - def __com__(self, other): - if other in self.values: - return 0 - if other < self.values: - return 1 - else: - return -1 - - -Date = datetime.date -Time = datetime.time -Timestamp = datetime.datetime - - -def DataFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - - -Binary = bytes - -# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) -# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) -# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) -# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() diff --git a/src/connector/python/linux/python3/taos/error.py b/src/connector/python/linux/python3/taos/error.py deleted file mode 100644 index c584badce8..0000000000 --- a/src/connector/python/linux/python3/taos/error.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Python exceptions -""" - - -class Error(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self._full_msg = self.msg - self.errno = errno - - def __str__(self): - return self._full_msg - - -class Warning(Exception): - """Exception raised for important warnings like data truncations while inserting. - """ - pass - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. - """ - pass - - -class DatabaseError(Error): - """Exception raised for errors that are related to the database. - """ - pass - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. - """ - pass - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer - """ - pass - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database is affected. - """ - pass - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal error. - """ - pass - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors. - """ - pass - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used which is not supported by the database,. - """ - pass diff --git a/src/connector/python/linux/python3/taos/subscription.py b/src/connector/python/linux/python3/taos/subscription.py deleted file mode 100644 index 270d9de092..0000000000 --- a/src/connector/python/linux/python3/taos/subscription.py +++ /dev/null @@ -1,57 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * - - -class TDengineSubscription(object): - """TDengine subscription object - """ - - def __init__(self, sub): - self._sub = sub - - def consume(self): - """Consume rows of a subscription - """ - if self._sub is None: - raise OperationalError("Invalid use of consume") - - result, fields = CTaosInterface.consume(self._sub) - buffer = [[] for i in range(len(fields))] - while True: - block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: - break - for i in range(len(fields)): - buffer[i].extend(block[i]) - - self.fields = fields - return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress=True): - """Close the Subscription. - """ - if self._sub is None: - return False - - CTaosInterface.unsubscribe(self._sub, keepProgress) - return True - - -if __name__ == '__main__': - from .connection import TDengineConnection - conn = TDengineConnection( - host="127.0.0.1", - user="root", - password="taosdata", - database="test") - - # Generate a cursor object to run SQL commands - sub = conn.subscribe(True, "test", "select * from meters;", 1000) - - for i in range(0, 10): - data = sub.consume() - for d in data: - print(d) - - sub.close() - conn.close() diff --git a/src/connector/python/osx/python3/LICENSE b/src/connector/python/osx/python3/LICENSE deleted file mode 100644 index 79a9d73086..0000000000 --- a/src/connector/python/osx/python3/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ - Copyright (c) 2019 TAOS Data, Inc. - -This program is free software: you can use, redistribute, and/or modify -it under the terms of the GNU Affero General Public License, version 3 -or later ("AGPL"), as published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/src/connector/python/osx/python3/README.md b/src/connector/python/osx/python3/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/osx/python3/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/osx/python3/setup.py b/src/connector/python/osx/python3/setup.py deleted file mode 100644 index 4c865676c9..0000000000 --- a/src/connector/python/osx/python3/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "Operating System :: MacOS X", - ], -) diff --git a/src/connector/python/osx/python3/taos/__init__.py b/src/connector/python/osx/python3/taos/__init__.py deleted file mode 100644 index 9732635738..0000000000 --- a/src/connector/python/osx/python3/taos/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ - -from .connection import TDengineConnection -from .cursor import TDengineCursor - -# Globals -threadsafety = 0 -paramstyle = 'pyformat' - -__all__ = ['connection', 'cursor'] - - -def connect(*args, **kwargs): - """ Function to return a TDengine connector object - - Current supporting keyword parameters: - @dsn: Data source name as string - @user: Username as string(optional) - @password: Password as string(optional) - @host: Hostname(optional) - @database: Database name(optional) - - @rtype: TDengineConnector - """ - return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/osx/python3/taos/cinterface.py b/src/connector/python/osx/python3/taos/cinterface.py deleted file mode 100644 index 720fbef6f5..0000000000 --- a/src/connector/python/osx/python3/taos/cinterface.py +++ /dev/null @@ -1,648 +0,0 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli / 1000.0) - - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro / 1000000.0) - - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] - - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - - -def _crow_tinyint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - - -def _crow_smallint_unsigned_to_python( - data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - - -def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - - -def _crow_bigint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) - else: - res.append((ctypes.cast(data + nbytes * i, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - - return res - - -def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - return res - - -def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - return res - - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -_CONVERT_FUNC_BLOCK = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python_block, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -# Corresponding TAOS_FIELD structure in C - - -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 65), - ('type', ctypes.c_char), - ('bytes', ctypes.c_short)] - -# C interface class - - -class CTaosInterface(object): - - libtaos = ctypes.CDLL('libtaos.dylib') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - libtaos.taos_subscribe.restype = ctypes.c_void_p - libtaos.taos_consume.restype = ctypes.c_void_p - libtaos.taos_fetch_lengths.restype = ctypes.c_void_p - libtaos.taos_free_result.restype = None - libtaos.taos_errno.restype = ctypes.c_int - libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config is not None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect( - self, - host=None, - user="root", - password="taosdata", - db=None, - port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value is None: - print('connect to TDengine failed') - raise ConnectionError("connect to TDengine failed") - # sys.exit(1) - # else: - # print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query( - connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(result): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(result) - - @staticmethod - def subscribe(connection, restart, topic, sql, interval): - """Create a subscription - @restart boolean, - @sql string, sql statement for data query, must be a 'select' statement. - @topic string, name of this subscription - """ - return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( - connection, - 1 if restart else 0, - ctypes.c_char_p(topic.encode('utf-8')), - ctypes.c_char_p(sql.encode('utf-8')), - None, - None, - interval)) - - @staticmethod - def consume(sub): - """Consume data of a subscription - """ - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.libtaos.taos_num_fields(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - return result, fields - - @staticmethod - def unsubscribe(sub, keepProgress): - """Cancel a subscription - """ - CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) - - @staticmethod - def useResult(result): - '''Use result after calling self.query - ''' - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - if num_of_rows == 0: - return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: - raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def fetchRow(result, fields): - pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock: - num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError( - "Invalid data type returned from database") - if data is None: - blocks[i] = [None] - else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - else: - return None, 0 - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(result): - return CTaosInterface.libtaos.taos_field_count(result) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(result): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(result) - - @staticmethod - def errStr(result): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - result = cinter.query(conn, 'show databases') - - print('Query Affected rows: {}'.format(cinter.affectedRows(result))) - - fields = CTaosInterface.useResult(result) - - data, num_of_rows = CTaosInterface.fetchBlock(result, fields) - - print(data) - - cinter.freeResult(result) - cinter.close(conn) diff --git a/src/connector/python/osx/python3/taos/connection.py b/src/connector/python/osx/python3/taos/connection.py deleted file mode 100644 index f6c395342c..0000000000 --- a/src/connector/python/osx/python3/taos/connection.py +++ /dev/null @@ -1,95 +0,0 @@ -from .cursor import TDengineCursor -from .subscription import TDengineSubscription -from .cinterface import CTaosInterface - - -class TDengineConnection(object): - """ TDengine connection object - """ - - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect( - self._host, - self._user, - self._password, - self._database, - self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def subscribe(self, restart, topic, sql, interval): - """Create a subscription. - """ - if self._conn is None: - return None - sub = CTaosInterface.subscribe( - self._conn, restart, topic, sql, interval) - return TDengineSubscription(sub) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - pass - - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() - print("Hello world") diff --git a/src/connector/python/osx/python3/taos/constants.py b/src/connector/python/osx/python3/taos/constants.py deleted file mode 100644 index 93466f5184..0000000000 --- a/src/connector/python/osx/python3/taos/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Constants in TDengine python -""" - -from .dbapi import * - - -class FieldType(object): - """TDengine Field Types - """ - # type_code - C_NULL = 0 - C_BOOL = 1 - C_TINYINT = 2 - C_SMALLINT = 3 - C_INT = 4 - C_BIGINT = 5 - C_FLOAT = 6 - C_DOUBLE = 7 - C_BINARY = 8 - C_TIMESTAMP = 9 - C_NCHAR = 10 - C_TINYINT_UNSIGNED = 11 - C_SMALLINT_UNSIGNED = 12 - C_INT_UNSIGNED = 13 - C_BIGINT_UNSIGNED = 14 - # NULL value definition - # NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL = 0x02 - C_TINYINT_NULL = -128 - C_TINYINT_UNSIGNED_NULL = 255 - C_SMALLINT_NULL = -32768 - C_SMALLINT_UNSIGNED_NULL = 65535 - C_INT_NULL = -2147483648 - C_INT_UNSIGNED_NULL = 4294967295 - C_BIGINT_NULL = -9223372036854775808 - C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float('nan') - C_DOUBLE_NULL = float('nan') - C_BINARY_NULL = bytearray([int('0xff', 16)]) - # Timestamp precision definition - C_TIMESTAMP_MILLI = 0 - C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/osx/python3/taos/cursor.py b/src/connector/python/osx/python3/taos/cursor.py deleted file mode 100644 index 32dc0ea3c3..0000000000 --- a/src/connector/python/osx/python3/taos/cursor.py +++ /dev/null @@ -1,280 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * -from .constants import FieldType - -# querySeqNum = 0 - - -class TDengineCursor(object): - """Database cursor which is used to manage the context of a fetch operation. - - Attributes: - .description: Read-only attribute consists of 7-item sequences: - - > name (mondatory) - > type_code (mondatory) - > display_size - > internal_size - > precision - > scale - > null_ok - - This attribute will be None for operations that do not return rows or - if the cursor has not had an operation invoked via the .execute*() method yet. - - .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected - """ - - def __init__(self, connection=None): - self._description = [] - self._rowcount = -1 - self._connection = None - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - self._logfile = "" - - if connection is not None: - self._connection = connection - - def __iter__(self): - return self - - def __next__(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetch iterator") - - if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow( - self._result, self._fields) - if self._block_rows == 0: - raise StopIteration - self._block = list(map(tuple, zip(*block))) - self._block_iter = 0 - - data = self._block[self._block_iter] - self._block_iter += 1 - - return data - - @property - def description(self): - """Return the description of the object. - """ - return self._description - - @property - def rowcount(self): - """Return the rowcount of the object - """ - return self._rowcount - - @property - def affected_rows(self): - """Return the rowcount of insertion - """ - return self._affected_rows - - def callproc(self, procname, *args): - """Call a stored database procedure with the given name. - - Void functionality since no stored procedures. - """ - pass - - def log(self, logfile): - self._logfile = logfile - - def close(self): - """Close the cursor. - """ - if self._connection is None: - return False - - self._reset_result() - self._connection = None - - return True - - def execute(self, operation, params=None): - """Prepare and execute a database operation (query or command). - """ - if not operation: - return None - - if not self._connection: - # TODO : change the exception raised here - raise ProgrammingError("Cursor is not connected") - - self._reset_result() - - stmt = operation - if params is not None: - pass - - # global querySeqNum - # querySeqNum += 1 - # localSeqNum = querySeqNum # avoid raice condition - # print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt))) - self._result = CTaosInterface.query(self._connection._conn, stmt) - # print(" << Query ({}) Exec Done".format(localSeqNum)) - if (self._logfile): - with open(self._logfile, "a") as logfile: - logfile.write("%s;\n" % operation) - - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno == 0: - if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows( - self._result) - return CTaosInterface.affectedRows(self._result) - else: - self._fields = CTaosInterface.useResult( - self._result) - return self._handle_result() - else: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - - def executemany(self, operation, seq_of_parameters): - """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. - """ - pass - - def fetchone(self): - """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. - """ - pass - - def fetchmany(self): - pass - - def istype(self, col, dataType): - if (dataType.upper() == "BOOL"): - if (self._description[col][1] == FieldType.C_BOOL): - return True - if (dataType.upper() == "TINYINT"): - if (self._description[col][1] == FieldType.C_TINYINT): - return True - if (dataType.upper() == "TINYINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_TINYINT_UNSIGNED): - return True - if (dataType.upper() == "SMALLINT"): - if (self._description[col][1] == FieldType.C_SMALLINT): - return True - if (dataType.upper() == "SMALLINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_SMALLINT_UNSIGNED): - return True - if (dataType.upper() == "INT"): - if (self._description[col][1] == FieldType.C_INT): - return True - if (dataType.upper() == "INT UNSIGNED"): - if (self._description[col][1] == FieldType.C_INT_UNSIGNED): - return True - if (dataType.upper() == "BIGINT"): - if (self._description[col][1] == FieldType.C_BIGINT): - return True - if (dataType.upper() == "BIGINT UNSIGNED"): - if (self._description[col][1] == FieldType.C_BIGINT_UNSIGNED): - return True - if (dataType.upper() == "FLOAT"): - if (self._description[col][1] == FieldType.C_FLOAT): - return True - if (dataType.upper() == "DOUBLE"): - if (self._description[col][1] == FieldType.C_DOUBLE): - return True - if (dataType.upper() == "BINARY"): - if (self._description[col][1] == FieldType.C_BINARY): - return True - if (dataType.upper() == "TIMESTAMP"): - if (self._description[col][1] == FieldType.C_TIMESTAMP): - return True - if (dataType.upper() == "NCHAR"): - if (self._description[col][1] == FieldType.C_NCHAR): - return True - - return False - - def fetchall_row(self): - """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. - """ - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchRow( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def fetchall(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def nextset(self): - """ - """ - pass - - def setinputsize(self, sizes): - pass - - def setutputsize(self, size, column=None): - pass - - def _reset_result(self): - """Reset the result to unused version. - """ - self._description = [] - self._rowcount = -1 - if self._result is not None: - CTaosInterface.freeResult(self._result) - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - - def _handle_result(self): - """Handle the return result from query. - """ - self._description = [] - for ele in self._fields: - self._description.append( - (ele['name'], ele['type'], None, None, None, None, False)) - - return self._result diff --git a/src/connector/python/osx/python3/taos/dbapi.py b/src/connector/python/osx/python3/taos/dbapi.py deleted file mode 100644 index 594681ada9..0000000000 --- a/src/connector/python/osx/python3/taos/dbapi.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Type Objects and Constructors. -""" - -import time -import datetime - - -class DBAPITypeObject(object): - def __init__(self, *values): - self.values = values - - def __com__(self, other): - if other in self.values: - return 0 - if other < self.values: - return 1 - else: - return -1 - - -Date = datetime.date -Time = datetime.time -Timestamp = datetime.datetime - - -def DataFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - - -Binary = bytes - -# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) -# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) -# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) -# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() diff --git a/src/connector/python/osx/python3/taos/error.py b/src/connector/python/osx/python3/taos/error.py deleted file mode 100644 index c584badce8..0000000000 --- a/src/connector/python/osx/python3/taos/error.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Python exceptions -""" - - -class Error(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self._full_msg = self.msg - self.errno = errno - - def __str__(self): - return self._full_msg - - -class Warning(Exception): - """Exception raised for important warnings like data truncations while inserting. - """ - pass - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. - """ - pass - - -class DatabaseError(Error): - """Exception raised for errors that are related to the database. - """ - pass - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. - """ - pass - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer - """ - pass - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database is affected. - """ - pass - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal error. - """ - pass - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors. - """ - pass - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used which is not supported by the database,. - """ - pass diff --git a/src/connector/python/osx/python3/taos/subscription.py b/src/connector/python/osx/python3/taos/subscription.py deleted file mode 100644 index 270d9de092..0000000000 --- a/src/connector/python/osx/python3/taos/subscription.py +++ /dev/null @@ -1,57 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * - - -class TDengineSubscription(object): - """TDengine subscription object - """ - - def __init__(self, sub): - self._sub = sub - - def consume(self): - """Consume rows of a subscription - """ - if self._sub is None: - raise OperationalError("Invalid use of consume") - - result, fields = CTaosInterface.consume(self._sub) - buffer = [[] for i in range(len(fields))] - while True: - block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: - break - for i in range(len(fields)): - buffer[i].extend(block[i]) - - self.fields = fields - return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress=True): - """Close the Subscription. - """ - if self._sub is None: - return False - - CTaosInterface.unsubscribe(self._sub, keepProgress) - return True - - -if __name__ == '__main__': - from .connection import TDengineConnection - conn = TDengineConnection( - host="127.0.0.1", - user="root", - password="taosdata", - database="test") - - # Generate a cursor object to run SQL commands - sub = conn.subscribe(True, "test", "select * from meters;", 1000) - - for i in range(0, 10): - data = sub.consume() - for d in data: - print(d) - - sub.close() - conn.close() diff --git a/src/connector/python/setup.py b/src/connector/python/setup.py new file mode 100644 index 0000000000..901e8396c0 --- /dev/null +++ b/src/connector/python/setup.py @@ -0,0 +1,34 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="taos", + version="2.0.10", + author="Taosdata Inc.", + author_email="support@taosdata.com", + description="TDengine python client package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/taosdata/TDengine/tree/develop/src/connector/python", + packages=setuptools.find_packages(), + classifiers=[ + "Environment :: Console", + "Environment :: MacOS X", + "Environment :: Win32 (MS Windows)", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", + "Operating System :: MacOS", + "Programming Language :: Python :: 2.7", + "Operating System :: Linux", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Operating System :: Microsoft :: Windows :: Windows 10", + ], +) diff --git a/src/connector/python/linux/python2/taos/__init__.py b/src/connector/python/taos/__init__.py similarity index 100% rename from src/connector/python/linux/python2/taos/__init__.py rename to src/connector/python/taos/__init__.py diff --git a/src/connector/python/linux/python3/taos/cinterface.py b/src/connector/python/taos/cinterface.py similarity index 70% rename from src/connector/python/linux/python3/taos/cinterface.py rename to src/connector/python/taos/cinterface.py index 3d0ecd2901..b8824327b0 100644 --- a/src/connector/python/linux/python3/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -3,6 +3,7 @@ from .constants import FieldType from .error import * import math import datetime +import platform def _convert_millisecond_to_datetime(milli): @@ -20,46 +21,28 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): if micro: _timestamp_converter = _convert_microsecond_to_datetime - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_int64))[ + :abs(num_of_rows)]] def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bool row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_byte))[ + :abs(num_of_rows)]] def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C tinyint row to python row """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] + return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] def _crow_tinyint_unsigned_to_python( @@ -69,92 +52,56 @@ def _crow_tinyint_unsigned_to_python( micro=False): """Function to convert C tinyint row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ubyte))[ + :abs(num_of_rows)]] def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_short))[ + :abs(num_of_rows)]] def _crow_smallint_unsigned_to_python( data, num_of_rows, nbytes=None, micro=False): """Function to convert C smallint row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_ushort))[ + :abs(num_of_rows)]] def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] + return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C int row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint))[ + :abs(num_of_rows)]] def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C bigint row to python row """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] + return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] def _crow_bigint_unsigned_to_python( @@ -164,52 +111,33 @@ def _crow_bigint_unsigned_to_python( micro=False): """Function to convert C bigint row to python row """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] + return [ + None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( + data, ctypes.POINTER( + ctypes.c_uint64))[ + :abs(num_of_rows)]] def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C float row to python row """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C double row to python row """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] + return [None if math.isnan(ele) else ele for ele in ctypes.cast( + data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): """Function to convert C binary row to python row """ assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] + return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( + 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): @@ -236,30 +164,17 @@ def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): """ assert(nbytes is not None) res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) + for i in range(abs(num_of_rows)): + try: + rbyte = ctypes.cast( + data + nbytes * i, + ctypes.POINTER( + ctypes.c_short))[ + :1].pop() + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()[0:rbyte]) + except ValueError: + res.append(None) return res @@ -268,20 +183,12 @@ def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): """ assert(nbytes is not None) res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) + for i in range(abs(num_of_rows)): + try: + tmpstr = ctypes.c_char_p(data + nbytes * i + 2) + res.append(tmpstr.value.decode()) + except ValueError: + res.append(None) return res @@ -330,14 +237,38 @@ class TaosField(ctypes.Structure): # C interface class +def _load_taos_linux(): + return ctypes.CDLL('libtaos.so') + + +def _load_taos_darwin(): + return ctypes.cDLL('libtaos.dylib') + + +def _load_taos_windows(): + return ctypes.windll.LoadLibrary('taos') + + +def _load_taos(): + load_func = { + 'Linux': _load_taos_linux, + 'Darwin': _load_taos_darwin, + 'Windows': _load_taos_windows, + } + try: + return load_func[platform.system()]() + except: + sys.exit('unsupported platform to TDengine connector') + + class CTaosInterface(object): - libtaos = ctypes.CDLL('libtaos.so') + libtaos = _load_taos() libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) libtaos.taos_init.restype = None libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p + # libtaos.taos_use_result.restype = ctypes.c_void_p libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) libtaos.taos_errstr.restype = ctypes.c_char_p libtaos.taos_subscribe.restype = ctypes.c_void_p @@ -438,7 +369,7 @@ class CTaosInterface(object): '''Close the TDengine handle ''' CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') + # print('connection is closed') @staticmethod def query(connection, sql): diff --git a/src/connector/python/linux/python2/taos/connection.py b/src/connector/python/taos/connection.py similarity index 100% rename from src/connector/python/linux/python2/taos/connection.py rename to src/connector/python/taos/connection.py diff --git a/src/connector/python/linux/python2/taos/constants.py b/src/connector/python/taos/constants.py similarity index 100% rename from src/connector/python/linux/python2/taos/constants.py rename to src/connector/python/taos/constants.py diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/taos/cursor.py similarity index 98% rename from src/connector/python/linux/python3/taos/cursor.py rename to src/connector/python/taos/cursor.py index 32dc0ea3c3..d443ec95d0 100644 --- a/src/connector/python/linux/python3/taos/cursor.py +++ b/src/connector/python/taos/cursor.py @@ -45,6 +45,12 @@ class TDengineCursor(object): return self def __next__(self): + return self._taos_next() + + def next(self): + return self._taos_next() + + def _taos_next(self): if self._result is None or self._fields is None: raise OperationalError("Invalid use of fetch iterator") diff --git a/src/connector/python/linux/python2/taos/dbapi.py b/src/connector/python/taos/dbapi.py similarity index 100% rename from src/connector/python/linux/python2/taos/dbapi.py rename to src/connector/python/taos/dbapi.py diff --git a/src/connector/python/linux/python2/taos/error.py b/src/connector/python/taos/error.py similarity index 100% rename from src/connector/python/linux/python2/taos/error.py rename to src/connector/python/taos/error.py diff --git a/src/connector/python/linux/python2/taos/subscription.py b/src/connector/python/taos/subscription.py similarity index 100% rename from src/connector/python/linux/python2/taos/subscription.py rename to src/connector/python/taos/subscription.py diff --git a/src/connector/python/windows/python2/LICENSE b/src/connector/python/windows/python2/LICENSE deleted file mode 100644 index 79a9d73086..0000000000 --- a/src/connector/python/windows/python2/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ - Copyright (c) 2019 TAOS Data, Inc. - -This program is free software: you can use, redistribute, and/or modify -it under the terms of the GNU Affero General Public License, version 3 -or later ("AGPL"), as published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/src/connector/python/windows/python2/README.md b/src/connector/python/windows/python2/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/windows/python2/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/windows/python2/setup.py b/src/connector/python/windows/python2/setup.py deleted file mode 100644 index 24d75f937c..0000000000 --- a/src/connector/python/windows/python2/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 2", - "Operating System :: Windows", - ], -) diff --git a/src/connector/python/windows/python2/taos/__init__.py b/src/connector/python/windows/python2/taos/__init__.py deleted file mode 100644 index 9732635738..0000000000 --- a/src/connector/python/windows/python2/taos/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ - -from .connection import TDengineConnection -from .cursor import TDengineCursor - -# Globals -threadsafety = 0 -paramstyle = 'pyformat' - -__all__ = ['connection', 'cursor'] - - -def connect(*args, **kwargs): - """ Function to return a TDengine connector object - - Current supporting keyword parameters: - @dsn: Data source name as string - @user: Username as string(optional) - @password: Password as string(optional) - @host: Hostname(optional) - @database: Database name(optional) - - @rtype: TDengineConnector - """ - return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/windows/python2/taos/cinterface.py b/src/connector/python/windows/python2/taos/cinterface.py deleted file mode 100644 index 65cb183f26..0000000000 --- a/src/connector/python/windows/python2/taos/cinterface.py +++ /dev/null @@ -1,648 +0,0 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli / 1000.0) - - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro / 1000000.0) - - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] - - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - - -def _crow_tinyint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - - -def _crow_smallint_unsigned_to_python( - data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - - -def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - - -def _crow_bigint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) - else: - res.append((ctypes.cast(data + nbytes * i, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - - return res - - -def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - return res - - -def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - return res - - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -_CONVERT_FUNC_BLOCK = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python_block, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -# Corresponding TAOS_FIELD structure in C - - -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 65), - ('type', ctypes.c_char), - ('bytes', ctypes.c_short)] - -# C interface class - - -class CTaosInterface(object): - - libtaos = ctypes.windll.LoadLibrary('taos') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - libtaos.taos_subscribe.restype = ctypes.c_void_p - libtaos.taos_consume.restype = ctypes.c_void_p - libtaos.taos_fetch_lengths.restype = ctypes.c_void_p - libtaos.taos_free_result.restype = None - libtaos.taos_errno.restype = ctypes.c_int - libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config is not None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect( - self, - host=None, - user="root", - password="taosdata", - db=None, - port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value is None: - print('connect to TDengine failed') - raise ConnectionError("connect to TDengine failed") - # sys.exit(1) - # else: - # print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query( - connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(result): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(result) - - @staticmethod - def subscribe(connection, restart, topic, sql, interval): - """Create a subscription - @restart boolean, - @sql string, sql statement for data query, must be a 'select' statement. - @topic string, name of this subscription - """ - return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( - connection, - 1 if restart else 0, - ctypes.c_char_p(topic.encode('utf-8')), - ctypes.c_char_p(sql.encode('utf-8')), - None, - None, - interval)) - - @staticmethod - def consume(sub): - """Consume data of a subscription - """ - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.libtaos.taos_num_fields(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - return result, fields - - @staticmethod - def unsubscribe(sub, keepProgress): - """Cancel a subscription - """ - CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) - - @staticmethod - def useResult(result): - '''Use result after calling self.query - ''' - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - if num_of_rows == 0: - return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: - raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def fetchRow(result, fields): - pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock: - num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError( - "Invalid data type returned from database") - if data is None: - blocks[i] = [None] - else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - else: - return None, 0 - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(result): - return CTaosInterface.libtaos.taos_field_count(result) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(result): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(result) - - @staticmethod - def errStr(result): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - result = cinter.query(conn, 'show databases') - - print('Query Affected rows: {}'.format(cinter.affectedRows(result))) - - fields = CTaosInterface.useResult(result) - - data, num_of_rows = CTaosInterface.fetchBlock(result, fields) - - print(data) - - cinter.freeResult(result) - cinter.close(conn) diff --git a/src/connector/python/windows/python2/taos/connection.py b/src/connector/python/windows/python2/taos/connection.py deleted file mode 100644 index 5729d01c6d..0000000000 --- a/src/connector/python/windows/python2/taos/connection.py +++ /dev/null @@ -1,96 +0,0 @@ -from .cursor import TDengineCursor -from .subscription import TDengineSubscription -from .cinterface import CTaosInterface - - -class TDengineConnection(object): - """ TDengine connection object - """ - - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - if len(kwargs) > 0: - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect( - self._host, - self._user, - self._password, - self._database, - self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def subscribe(self, restart, topic, sql, interval): - """Create a subscription. - """ - if self._conn is None: - return None - sub = CTaosInterface.subscribe( - self._conn, restart, topic, sql, interval) - return TDengineSubscription(sub) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - pass - - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() - print("Hello world") diff --git a/src/connector/python/windows/python2/taos/constants.py b/src/connector/python/windows/python2/taos/constants.py deleted file mode 100644 index 8a8011c3e3..0000000000 --- a/src/connector/python/windows/python2/taos/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Constants in TDengine python -""" - -from .dbapi import * - - -class FieldType(object): - """TDengine Field Types - """ - # type_code - C_NULL = 0 - C_BOOL = 1 - C_TINYINT = 2 - C_SMALLINT = 3 - C_INT = 4 - C_BIGINT = 5 - C_FLOAT = 6 - C_DOUBLE = 7 - C_BINARY = 8 - C_TIMESTAMP = 9 - C_NCHAR = 10 - C_TINYINT_UNSIGNED = 11 - C_SMALLINT_UNSIGNED = 12 - C_INT_UNSIGNED = 13 - C_BIGINT_UNSIGNED = 14 - # NULL value definition - # NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL = 0x02 - C_TINYINT_NULL = -128 - C_TINYINT_UNSIGNED_NULL = 255 - C_SMALLINT_NULL = -32768 - C_SMALLINT_UNSIGNED_NULL = 65535 - C_INT_NULL = -2147483648 - C_INT_UNSIGNED_NULL = 4294967295 - C_BIGINT_NULL = -9223372036854775808 - C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float('nan') - C_DOUBLE_NULL = float('nan') - C_BINARY_NULL = bytearray([int('0xff', 16)]) - # Time precision definition - C_TIMESTAMP_MILLI = 0 - C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/windows/python2/taos/cursor.py b/src/connector/python/windows/python2/taos/cursor.py deleted file mode 100644 index 5f4666b593..0000000000 --- a/src/connector/python/windows/python2/taos/cursor.py +++ /dev/null @@ -1,220 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * -from .constants import FieldType - -# querySeqNum = 0 - - -class TDengineCursor(object): - """Database cursor which is used to manage the context of a fetch operation. - - Attributes: - .description: Read-only attribute consists of 7-item sequences: - - > name (mondatory) - > type_code (mondatory) - > display_size - > internal_size - > precision - > scale - > null_ok - - This attribute will be None for operations that do not return rows or - if the cursor has not had an operation invoked via the .execute*() method yet. - - .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected - """ - - def __init__(self, connection=None): - self._description = [] - self._rowcount = -1 - self._connection = None - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - self._logfile = "" - - if connection is not None: - self._connection = connection - - def __iter__(self): - return self - - def __next__(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetch iterator") - - if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow( - self._result, self._fields) - if self._block_rows == 0: - raise StopIteration - self._block = list(map(tuple, zip(*block))) - self._block_iter = 0 - - data = self._block[self._block_iter] - self._block_iter += 1 - - return data - - @property - def description(self): - """Return the description of the object. - """ - return self._description - - @property - def rowcount(self): - """Return the rowcount of the object - """ - return self._rowcount - - @property - def affected_rows(self): - """Return the affected_rows of the object - """ - return self._affected_rows - - def callproc(self, procname, *args): - """Call a stored database procedure with the given name. - - Void functionality since no stored procedures. - """ - pass - - def close(self): - """Close the cursor. - """ - if self._connection is None: - return False - - self._reset_result() - self._connection = None - - return True - - def execute(self, operation, params=None): - """Prepare and execute a database operation (query or command). - """ - if not operation: - return None - - if not self._connection: - # TODO : change the exception raised here - raise ProgrammingError("Cursor is not connected") - - self._reset_result() - - stmt = operation - if params is not None: - pass - - self._result = CTaosInterface.query(self._connection._conn, stmt) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno == 0: - if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows( - self._result) - return CTaosInterface.affectedRows(self._result) - else: - self._fields = CTaosInterface.useResult(self._result) - return self._handle_result() - else: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - - def executemany(self, operation, seq_of_parameters): - """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. - """ - pass - - def fetchone(self): - """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. - """ - pass - - def fetchmany(self): - pass - - def fetchall_row(self): - """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. - """ - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchRow( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def fetchall(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - - return list(map(tuple, zip(*buffer))) - - def nextset(self): - """ - """ - pass - - def setinputsize(self, sizes): - pass - - def setutputsize(self, size, column=None): - pass - - def _reset_result(self): - """Reset the result to unused version. - """ - self._description = [] - self._rowcount = -1 - if self._result is not None: - CTaosInterface.freeResult(self._result) - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - - def _handle_result(self): - """Handle the return result from query. - """ - self._description = [] - for ele in self._fields: - self._description.append( - (ele['name'], ele['type'], None, None, None, None, False)) - - return self._result diff --git a/src/connector/python/windows/python2/taos/dbapi.py b/src/connector/python/windows/python2/taos/dbapi.py deleted file mode 100644 index 594681ada9..0000000000 --- a/src/connector/python/windows/python2/taos/dbapi.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Type Objects and Constructors. -""" - -import time -import datetime - - -class DBAPITypeObject(object): - def __init__(self, *values): - self.values = values - - def __com__(self, other): - if other in self.values: - return 0 - if other < self.values: - return 1 - else: - return -1 - - -Date = datetime.date -Time = datetime.time -Timestamp = datetime.datetime - - -def DataFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - - -Binary = bytes - -# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) -# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) -# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) -# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() diff --git a/src/connector/python/windows/python2/taos/error.py b/src/connector/python/windows/python2/taos/error.py deleted file mode 100644 index c584badce8..0000000000 --- a/src/connector/python/windows/python2/taos/error.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Python exceptions -""" - - -class Error(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self._full_msg = self.msg - self.errno = errno - - def __str__(self): - return self._full_msg - - -class Warning(Exception): - """Exception raised for important warnings like data truncations while inserting. - """ - pass - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. - """ - pass - - -class DatabaseError(Error): - """Exception raised for errors that are related to the database. - """ - pass - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. - """ - pass - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer - """ - pass - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database is affected. - """ - pass - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal error. - """ - pass - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors. - """ - pass - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used which is not supported by the database,. - """ - pass diff --git a/src/connector/python/windows/python2/taos/subscription.py b/src/connector/python/windows/python2/taos/subscription.py deleted file mode 100644 index 270d9de092..0000000000 --- a/src/connector/python/windows/python2/taos/subscription.py +++ /dev/null @@ -1,57 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * - - -class TDengineSubscription(object): - """TDengine subscription object - """ - - def __init__(self, sub): - self._sub = sub - - def consume(self): - """Consume rows of a subscription - """ - if self._sub is None: - raise OperationalError("Invalid use of consume") - - result, fields = CTaosInterface.consume(self._sub) - buffer = [[] for i in range(len(fields))] - while True: - block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: - break - for i in range(len(fields)): - buffer[i].extend(block[i]) - - self.fields = fields - return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress=True): - """Close the Subscription. - """ - if self._sub is None: - return False - - CTaosInterface.unsubscribe(self._sub, keepProgress) - return True - - -if __name__ == '__main__': - from .connection import TDengineConnection - conn = TDengineConnection( - host="127.0.0.1", - user="root", - password="taosdata", - database="test") - - # Generate a cursor object to run SQL commands - sub = conn.subscribe(True, "test", "select * from meters;", 1000) - - for i in range(0, 10): - data = sub.consume() - for d in data: - print(d) - - sub.close() - conn.close() diff --git a/src/connector/python/windows/python3/LICENSE b/src/connector/python/windows/python3/LICENSE deleted file mode 100644 index 2d032e65d8..0000000000 --- a/src/connector/python/windows/python3/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ - Copyright (c) 2019 TAOS Data, Inc. - -This program is free software: you can use, redistribute, and/or modify -it under the terms of the GNU Affero General Public License, version 3 -or later ("AGPL"), as published by the Free Software Foundation. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . diff --git a/src/connector/python/windows/python3/README.md b/src/connector/python/windows/python3/README.md deleted file mode 100644 index 70db6bba13..0000000000 --- a/src/connector/python/windows/python3/README.md +++ /dev/null @@ -1 +0,0 @@ -# TDengine python client interface \ No newline at end of file diff --git a/src/connector/python/windows/python3/setup.py b/src/connector/python/windows/python3/setup.py deleted file mode 100644 index 2659c493aa..0000000000 --- a/src/connector/python/windows/python3/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="taos", - version="2.0.9", - author="Taosdata Inc.", - author_email="support@taosdata.com", - description="TDengine python client package", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/pypa/sampleproject", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "Operating System :: Windows", - ], -) diff --git a/src/connector/python/windows/python3/taos/__init__.py b/src/connector/python/windows/python3/taos/__init__.py deleted file mode 100644 index b57e25fd2c..0000000000 --- a/src/connector/python/windows/python3/taos/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ - -from .connection import TDengineConnection -from .cursor import TDengineCursor - -# Globals -threadsafety = 0 -paramstyle = 'pyformat' - -__all__ = ['connection', 'cursor'] - - -def connect(*args, **kwargs): - """ Function to return a TDengine connector object - - Current supporting keyword parameters: - @dsn: Data source name as string - @user: Username as string(optional) - @password: Password as string(optional) - @host: Hostname(optional) - @database: Database name(optional) - - @rtype: TDengineConnector - """ - return TDengineConnection(*args, **kwargs) diff --git a/src/connector/python/windows/python3/taos/cinterface.py b/src/connector/python/windows/python3/taos/cinterface.py deleted file mode 100644 index 65cb183f26..0000000000 --- a/src/connector/python/windows/python3/taos/cinterface.py +++ /dev/null @@ -1,648 +0,0 @@ -import ctypes -from .constants import FieldType -from .error import * -import math -import datetime - - -def _convert_millisecond_to_datetime(milli): - return datetime.datetime.fromtimestamp(milli / 1000.0) - - -def _convert_microsecond_to_datetime(micro): - return datetime.datetime.fromtimestamp(micro / 1000000.0) - - -def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - _timestamp_converter = _convert_millisecond_to_datetime - if micro: - _timestamp_converter = _convert_microsecond_to_datetime - - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_NULL else _timestamp_converter(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_int64))[ - :abs(num_of_rows)]] - - -def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bool row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_byte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BOOL_NULL else bool(ele) for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_bool))[ - :abs(num_of_rows)]] - - -def _crow_tinyint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_TINYINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_byte))[:abs(num_of_rows)]] - - -def _crow_tinyint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C tinyint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_TINYINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ubyte))[ - :abs(num_of_rows)]] - - -def _crow_smallint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_short))[ - :abs(num_of_rows)]] - - -def _crow_smallint_unsigned_to_python( - data, num_of_rows, nbytes=None, micro=False): - """Function to convert C smallint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_SMALLINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_ushort))[ - :abs(num_of_rows)]] - - -def _crow_int_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_INT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int))[:abs(num_of_rows)]] - - -def _crow_int_unsigned_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C int row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_INT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint))[ - :abs(num_of_rows)]] - - -def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - else: - return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]] - - -def _crow_bigint_unsigned_to_python( - data, - num_of_rows, - nbytes=None, - micro=False): - """Function to convert C bigint row to python row - """ - if num_of_rows > 0: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - else: - return [ - None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast( - data, ctypes.POINTER( - ctypes.c_uint64))[ - :abs(num_of_rows)]] - - -def _crow_float_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C float row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_float))[:abs(num_of_rows)]] - - -def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C double row to python row - """ - if num_of_rows > 0: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - else: - return [None if math.isnan(ele) else ele for ele in ctypes.cast( - data, ctypes.POINTER(ctypes.c_double))[:abs(num_of_rows)]] - - -def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - if num_of_rows > 0: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - else: - return [None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode( - 'utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] - - -def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - for i in range(abs(num_of_rows)): - try: - if num_of_rows >= 0: - tmpstr = ctypes.c_char_p(data) - res.append(tmpstr.value.decode()) - else: - res.append((ctypes.cast(data + nbytes * i, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - - return res - - -def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C binary row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows > 0: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - rbyte = ctypes.cast( - data + nbytes * i, - ctypes.POINTER( - ctypes.c_short))[ - :1].pop() - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()[0:rbyte]) - except ValueError: - res.append(None) - return res - - -def _crow_nchar_to_python_block(data, num_of_rows, nbytes=None, micro=False): - """Function to convert C nchar row to python row - """ - assert(nbytes is not None) - res = [] - if num_of_rows >= 0: - for i in range(abs(num_of_rows)): - try: - tmpstr = ctypes.c_char_p(data + nbytes * i + 2) - res.append(tmpstr.value.decode()) - except ValueError: - res.append(None) - else: - for i in range(abs(num_of_rows)): - try: - res.append((ctypes.cast(data + nbytes * i + 2, - ctypes.POINTER(ctypes.c_wchar * (nbytes // 4))))[0].value) - except ValueError: - res.append(None) - return res - - -_CONVERT_FUNC = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -_CONVERT_FUNC_BLOCK = { - FieldType.C_BOOL: _crow_bool_to_python, - FieldType.C_TINYINT: _crow_tinyint_to_python, - FieldType.C_SMALLINT: _crow_smallint_to_python, - FieldType.C_INT: _crow_int_to_python, - FieldType.C_BIGINT: _crow_bigint_to_python, - FieldType.C_FLOAT: _crow_float_to_python, - FieldType.C_DOUBLE: _crow_double_to_python, - FieldType.C_BINARY: _crow_binary_to_python_block, - FieldType.C_TIMESTAMP: _crow_timestamp_to_python, - FieldType.C_NCHAR: _crow_nchar_to_python_block, - FieldType.C_TINYINT_UNSIGNED: _crow_tinyint_unsigned_to_python, - FieldType.C_SMALLINT_UNSIGNED: _crow_smallint_unsigned_to_python, - FieldType.C_INT_UNSIGNED: _crow_int_unsigned_to_python, - FieldType.C_BIGINT_UNSIGNED: _crow_bigint_unsigned_to_python -} - -# Corresponding TAOS_FIELD structure in C - - -class TaosField(ctypes.Structure): - _fields_ = [('name', ctypes.c_char * 65), - ('type', ctypes.c_char), - ('bytes', ctypes.c_short)] - -# C interface class - - -class CTaosInterface(object): - - libtaos = ctypes.windll.LoadLibrary('taos') - - libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField) - libtaos.taos_init.restype = None - libtaos.taos_connect.restype = ctypes.c_void_p - #libtaos.taos_use_result.restype = ctypes.c_void_p - libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p) - libtaos.taos_errstr.restype = ctypes.c_char_p - libtaos.taos_subscribe.restype = ctypes.c_void_p - libtaos.taos_consume.restype = ctypes.c_void_p - libtaos.taos_fetch_lengths.restype = ctypes.c_void_p - libtaos.taos_free_result.restype = None - libtaos.taos_errno.restype = ctypes.c_int - libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) - - def __init__(self, config=None): - ''' - Function to initialize the class - @host : str, hostname to connect - @user : str, username to connect to server - @password : str, password to connect to server - @db : str, default db to use when log in - @config : str, config directory - - @rtype : None - ''' - if config is None: - self._config = ctypes.c_char_p(None) - else: - try: - self._config = ctypes.c_char_p(config.encode('utf-8')) - except AttributeError: - raise AttributeError("config is expected as a str") - - if config is not None: - CTaosInterface.libtaos.taos_options(3, self._config) - - CTaosInterface.libtaos.taos_init() - - @property - def config(self): - """ Get current config - """ - return self._config - - def connect( - self, - host=None, - user="root", - password="taosdata", - db=None, - port=0): - ''' - Function to connect to server - - @rtype: c_void_p, TDengine handle - ''' - # host - try: - _host = ctypes.c_char_p(host.encode( - "utf-8")) if host is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("host is expected as a str") - - # user - try: - _user = ctypes.c_char_p(user.encode("utf-8")) - except AttributeError: - raise AttributeError("user is expected as a str") - - # password - try: - _password = ctypes.c_char_p(password.encode("utf-8")) - except AttributeError: - raise AttributeError("password is expected as a str") - - # db - try: - _db = ctypes.c_char_p( - db.encode("utf-8")) if db is not None else ctypes.c_char_p(None) - except AttributeError: - raise AttributeError("db is expected as a str") - - # port - try: - _port = ctypes.c_int(port) - except TypeError: - raise TypeError("port is expected as an int") - - connection = ctypes.c_void_p(CTaosInterface.libtaos.taos_connect( - _host, _user, _password, _db, _port)) - - if connection.value is None: - print('connect to TDengine failed') - raise ConnectionError("connect to TDengine failed") - # sys.exit(1) - # else: - # print('connect to TDengine success') - - return connection - - @staticmethod - def close(connection): - '''Close the TDengine handle - ''' - CTaosInterface.libtaos.taos_close(connection) - #print('connection is closed') - - @staticmethod - def query(connection, sql): - '''Run SQL - - @sql: str, sql string to run - - @rtype: 0 on success and -1 on failure - ''' - try: - return CTaosInterface.libtaos.taos_query( - connection, ctypes.c_char_p(sql.encode('utf-8'))) - except AttributeError: - raise AttributeError("sql is expected as a string") - # finally: - # CTaosInterface.libtaos.close(connection) - - @staticmethod - def affectedRows(result): - """The affected rows after runing query - """ - return CTaosInterface.libtaos.taos_affected_rows(result) - - @staticmethod - def subscribe(connection, restart, topic, sql, interval): - """Create a subscription - @restart boolean, - @sql string, sql statement for data query, must be a 'select' statement. - @topic string, name of this subscription - """ - return ctypes.c_void_p(CTaosInterface.libtaos.taos_subscribe( - connection, - 1 if restart else 0, - ctypes.c_char_p(topic.encode('utf-8')), - ctypes.c_char_p(sql.encode('utf-8')), - None, - None, - interval)) - - @staticmethod - def consume(sub): - """Consume data of a subscription - """ - result = ctypes.c_void_p(CTaosInterface.libtaos.taos_consume(sub)) - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.libtaos.taos_num_fields(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - return result, fields - - @staticmethod - def unsubscribe(sub, keepProgress): - """Cancel a subscription - """ - CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0) - - @staticmethod - def useResult(result): - '''Use result after calling self.query - ''' - fields = [] - pfields = CTaosInterface.fetchFields(result) - for i in range(CTaosInterface.fieldsCount(result)): - fields.append({'name': pfields[i].name.decode('utf-8'), - 'bytes': pfields[i].bytes, - 'type': ord(pfields[i].type)}) - - return fields - - @staticmethod - def fetchBlock(result, fields): - pblock = ctypes.c_void_p(0) - num_of_rows = CTaosInterface.libtaos.taos_fetch_block( - result, ctypes.byref(pblock)) - if num_of_rows == 0: - return None, 0 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC_BLOCK: - raise DatabaseError("Invalid data type returned from database") - blocks[i] = _CONVERT_FUNC_BLOCK[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - - return blocks, abs(num_of_rows) - - @staticmethod - def fetchRow(result, fields): - pblock = ctypes.c_void_p(0) - pblock = CTaosInterface.libtaos.taos_fetch_row(result) - if pblock: - num_of_rows = 1 - isMicro = (CTaosInterface.libtaos.taos_result_precision( - result) == FieldType.C_TIMESTAMP_MICRO) - blocks = [None] * len(fields) - fieldL = CTaosInterface.libtaos.taos_fetch_lengths(result) - fieldLen = [ - ele for ele in ctypes.cast( - fieldL, ctypes.POINTER( - ctypes.c_int))[ - :len(fields)]] - for i in range(len(fields)): - data = ctypes.cast(pblock, ctypes.POINTER(ctypes.c_void_p))[i] - if fields[i]['type'] not in _CONVERT_FUNC: - raise DatabaseError( - "Invalid data type returned from database") - if data is None: - blocks[i] = [None] - else: - blocks[i] = _CONVERT_FUNC[fields[i]['type']]( - data, num_of_rows, fieldLen[i], isMicro) - else: - return None, 0 - return blocks, abs(num_of_rows) - - @staticmethod - def freeResult(result): - CTaosInterface.libtaos.taos_free_result(result) - result.value = None - - @staticmethod - def fieldsCount(result): - return CTaosInterface.libtaos.taos_field_count(result) - - @staticmethod - def fetchFields(result): - return CTaosInterface.libtaos.taos_fetch_fields(result) - - # @staticmethod - # def fetchRow(result, fields): - # l = [] - # row = CTaosInterface.libtaos.taos_fetch_row(result) - # if not row: - # return None - - # for i in range(len(fields)): - # l.append(CTaosInterface.getDataValue( - # row[i], fields[i]['type'], fields[i]['bytes'])) - - # return tuple(l) - - # @staticmethod - # def getDataValue(data, dtype, byte): - # ''' - # ''' - # if not data: - # return None - - # if (dtype == CTaosInterface.TSDB_DATA_TYPE_BOOL): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_bool))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TINYINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_byte))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_SMALLINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_short))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_double))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY): - # return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00') - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP): - # return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0] - # elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR): - # return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00') - - @staticmethod - def errno(result): - """Return the error number. - """ - return CTaosInterface.libtaos.taos_errno(result) - - @staticmethod - def errStr(result): - """Return the error styring - """ - return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8') - - -if __name__ == '__main__': - cinter = CTaosInterface() - conn = cinter.connect() - result = cinter.query(conn, 'show databases') - - print('Query Affected rows: {}'.format(cinter.affectedRows(result))) - - fields = CTaosInterface.useResult(result) - - data, num_of_rows = CTaosInterface.fetchBlock(result, fields) - - print(data) - - cinter.freeResult(result) - cinter.close(conn) diff --git a/src/connector/python/windows/python3/taos/connection.py b/src/connector/python/windows/python3/taos/connection.py deleted file mode 100644 index 5729d01c6d..0000000000 --- a/src/connector/python/windows/python3/taos/connection.py +++ /dev/null @@ -1,96 +0,0 @@ -from .cursor import TDengineCursor -from .subscription import TDengineSubscription -from .cinterface import CTaosInterface - - -class TDengineConnection(object): - """ TDengine connection object - """ - - def __init__(self, *args, **kwargs): - self._conn = None - self._host = None - self._user = "root" - self._password = "taosdata" - self._database = None - self._port = 0 - self._config = None - self._chandle = None - - if len(kwargs) > 0: - self.config(**kwargs) - - def config(self, **kwargs): - # host - if 'host' in kwargs: - self._host = kwargs['host'] - - # user - if 'user' in kwargs: - self._user = kwargs['user'] - - # password - if 'password' in kwargs: - self._password = kwargs['password'] - - # database - if 'database' in kwargs: - self._database = kwargs['database'] - - # port - if 'port' in kwargs: - self._port = kwargs['port'] - - # config - if 'config' in kwargs: - self._config = kwargs['config'] - - self._chandle = CTaosInterface(self._config) - self._conn = self._chandle.connect( - self._host, - self._user, - self._password, - self._database, - self._port) - - def close(self): - """Close current connection. - """ - return CTaosInterface.close(self._conn) - - def subscribe(self, restart, topic, sql, interval): - """Create a subscription. - """ - if self._conn is None: - return None - sub = CTaosInterface.subscribe( - self._conn, restart, topic, sql, interval) - return TDengineSubscription(sub) - - def cursor(self): - """Return a new Cursor object using the connection. - """ - return TDengineCursor(self) - - def commit(self): - """Commit any pending transaction to the database. - - Since TDengine do not support transactions, the implement is void functionality. - """ - pass - - def rollback(self): - """Void functionality - """ - pass - - def clear_result_set(self): - """Clear unused result set on this connection. - """ - pass - - -if __name__ == "__main__": - conn = TDengineConnection(host='192.168.1.107') - conn.close() - print("Hello world") diff --git a/src/connector/python/windows/python3/taos/constants.py b/src/connector/python/windows/python3/taos/constants.py deleted file mode 100644 index 49fc17b2fb..0000000000 --- a/src/connector/python/windows/python3/taos/constants.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Constants in TDengine python -""" - -from .dbapi import * - - -class FieldType(object): - """TDengine Field Types - """ - # type_code - C_NULL = 0 - C_BOOL = 1 - C_TINYINT = 2 - C_SMALLINT = 3 - C_INT = 4 - C_BIGINT = 5 - C_FLOAT = 6 - C_DOUBLE = 7 - C_BINARY = 8 - C_TIMESTAMP = 9 - C_NCHAR = 10 - C_TINYINT_UNSIGNED = 11 - C_SMALLINT_UNSIGNED = 12 - C_INT_UNSIGNED = 13 - C_BIGINT_UNSIGNED = 14 - # NULL value definition - # NOTE: These values should change according to C definition in tsdb.h - C_BOOL_NULL = 0x02 - C_TINYINT_NULL = -128 - C_TINYINT_UNSIGNED_NULL = 255 - C_SMALLINT_NULL = -32768 - C_SMALLINT_UNSIGNED_NULL = 65535 - C_INT_NULL = -2147483648 - C_INT_UNSIGNED_NULL = 4294967295 - C_BIGINT_NULL = -9223372036854775808 - C_BIGINT_UNSIGNED_NULL = 18446744073709551615 - C_FLOAT_NULL = float('nan') - C_DOUBLE_NULL = float('nan') - C_BINARY_NULL = bytearray([int('0xff', 16)]) - # Timestamp precision definition - C_TIMESTAMP_MILLI = 0 - C_TIMESTAMP_MICRO = 1 diff --git a/src/connector/python/windows/python3/taos/cursor.py b/src/connector/python/windows/python3/taos/cursor.py deleted file mode 100644 index 136cd42fe4..0000000000 --- a/src/connector/python/windows/python3/taos/cursor.py +++ /dev/null @@ -1,220 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * -from .constants import FieldType - -# querySeqNum = 0 - - -class TDengineCursor(object): - """Database cursor which is used to manage the context of a fetch operation. - - Attributes: - .description: Read-only attribute consists of 7-item sequences: - - > name (mondatory) - > type_code (mondatory) - > display_size - > internal_size - > precision - > scale - > null_ok - - This attribute will be None for operations that do not return rows or - if the cursor has not had an operation invoked via the .execute*() method yet. - - .rowcount:This read-only attribute specifies the number of rows that the last - .execute*() produced (for DQL statements like SELECT) or affected - """ - - def __init__(self, connection=None): - self._description = [] - self._rowcount = -1 - self._connection = None - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - self._logfile = "" - - if connection is not None: - self._connection = connection - - def __iter__(self): - return self - - def __next__(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetch iterator") - - if self._block_rows <= self._block_iter: - block, self._block_rows = CTaosInterface.fetchRow( - self._result, self._fields) - if self._block_rows == 0: - raise StopIteration - self._block = list(map(tuple, zip(*block))) - self._block_iter = 0 - - data = self._block[self._block_iter] - self._block_iter += 1 - - return data - - @property - def description(self): - """Return the description of the object. - """ - return self._description - - @property - def rowcount(self): - """Return the rowcount of the object - """ - return self._rowcount - - @property - def affected_rows(self): - """Return the affected_rows of the object - """ - return self._affected_rows - - def callproc(self, procname, *args): - """Call a stored database procedure with the given name. - - Void functionality since no stored procedures. - """ - pass - - def close(self): - """Close the cursor. - """ - if self._connection is None: - return False - - self._reset_result() - self._connection = None - - return True - - def execute(self, operation, params=None): - """Prepare and execute a database operation (query or command). - """ - if not operation: - return None - - if not self._connection: - # TODO : change the exception raised here - raise ProgrammingError("Cursor is not connected") - - self._reset_result() - - stmt = operation - if params is not None: - pass - - self._result = CTaosInterface.query(self._connection._conn, stmt) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno == 0: - if CTaosInterface.fieldsCount(self._result) == 0: - self._affected_rows += CTaosInterface.affectedRows( - self._result) - return CTaosInterface.affectedRows(self._result) - else: - self._fields = CTaosInterface.useResult(self._result) - return self._handle_result() - else: - raise ProgrammingError(CTaosInterface.errStr(self._result), errno) - - def executemany(self, operation, seq_of_parameters): - """Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. - """ - pass - - def fetchone(self): - """Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. - """ - pass - - def fetchmany(self): - pass - - def fetchall_row(self): - """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. - """ - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchRow( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - return list(map(tuple, zip(*buffer))) - - def fetchall(self): - if self._result is None or self._fields is None: - raise OperationalError("Invalid use of fetchall") - - buffer = [[] for i in range(len(self._fields))] - self._rowcount = 0 - while True: - block, num_of_fields = CTaosInterface.fetchBlock( - self._result, self._fields) - errno = CTaosInterface.libtaos.taos_errno(self._result) - if errno != 0: - raise ProgrammingError( - CTaosInterface.errStr( - self._result), errno) - if num_of_fields == 0: - break - self._rowcount += num_of_fields - for i in range(len(self._fields)): - buffer[i].extend(block[i]) - - return list(map(tuple, zip(*buffer))) - - def nextset(self): - """ - """ - pass - - def setinputsize(self, sizes): - pass - - def setutputsize(self, size, column=None): - pass - - def _reset_result(self): - """Reset the result to unused version. - """ - self._description = [] - self._rowcount = -1 - if self._result is not None: - CTaosInterface.freeResult(self._result) - self._result = None - self._fields = None - self._block = None - self._block_rows = -1 - self._block_iter = 0 - self._affected_rows = 0 - - def _handle_result(self): - """Handle the return result from query. - """ - self._description = [] - for ele in self._fields: - self._description.append( - (ele['name'], ele['type'], None, None, None, None, False)) - - return self._result diff --git a/src/connector/python/windows/python3/taos/dbapi.py b/src/connector/python/windows/python3/taos/dbapi.py deleted file mode 100644 index a29621f7a3..0000000000 --- a/src/connector/python/windows/python3/taos/dbapi.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Type Objects and Constructors. -""" - -import time -import datetime - - -class DBAPITypeObject(object): - def __init__(self, *values): - self.values = values - - def __com__(self, other): - if other in self.values: - return 0 - if other < self.values: - return 1 - else: - return -1 - - -Date = datetime.date -Time = datetime.time -Timestamp = datetime.datetime - - -def DataFromTicks(ticks): - return Date(*time.localtime(ticks)[:3]) - - -def TimeFromTicks(ticks): - return Time(*time.localtime(ticks)[3:6]) - - -def TimestampFromTicks(ticks): - return Timestamp(*time.localtime(ticks)[:6]) - - -Binary = bytes - -# STRING = DBAPITypeObject(*constants.FieldType.get_string_types()) -# BINARY = DBAPITypeObject(*constants.FieldType.get_binary_types()) -# NUMBER = BAPITypeObject(*constants.FieldType.get_number_types()) -# DATETIME = DBAPITypeObject(*constants.FieldType.get_timestamp_types()) -# ROWID = DBAPITypeObject() diff --git a/src/connector/python/windows/python3/taos/error.py b/src/connector/python/windows/python3/taos/error.py deleted file mode 100644 index 238b293a0b..0000000000 --- a/src/connector/python/windows/python3/taos/error.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Python exceptions -""" - - -class Error(Exception): - def __init__(self, msg=None, errno=None): - self.msg = msg - self._full_msg = self.msg - self.errno = errno - - def __str__(self): - return self._full_msg - - -class Warning(Exception): - """Exception raised for important warnings like data truncations while inserting. - """ - pass - - -class InterfaceError(Error): - """Exception raised for errors that are related to the database interface rather than the database itself. - """ - pass - - -class DatabaseError(Error): - """Exception raised for errors that are related to the database. - """ - pass - - -class DataError(DatabaseError): - """Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. - """ - pass - - -class OperationalError(DatabaseError): - """Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer - """ - pass - - -class IntegrityError(DatabaseError): - """Exception raised when the relational integrity of the database is affected. - """ - pass - - -class InternalError(DatabaseError): - """Exception raised when the database encounters an internal error. - """ - pass - - -class ProgrammingError(DatabaseError): - """Exception raised for programming errors. - """ - pass - - -class NotSupportedError(DatabaseError): - """Exception raised in case a method or database API was used which is not supported by the database,. - """ - pass diff --git a/src/connector/python/windows/python3/taos/subscription.py b/src/connector/python/windows/python3/taos/subscription.py deleted file mode 100644 index 270d9de092..0000000000 --- a/src/connector/python/windows/python3/taos/subscription.py +++ /dev/null @@ -1,57 +0,0 @@ -from .cinterface import CTaosInterface -from .error import * - - -class TDengineSubscription(object): - """TDengine subscription object - """ - - def __init__(self, sub): - self._sub = sub - - def consume(self): - """Consume rows of a subscription - """ - if self._sub is None: - raise OperationalError("Invalid use of consume") - - result, fields = CTaosInterface.consume(self._sub) - buffer = [[] for i in range(len(fields))] - while True: - block, num_of_fields = CTaosInterface.fetchBlock(result, fields) - if num_of_fields == 0: - break - for i in range(len(fields)): - buffer[i].extend(block[i]) - - self.fields = fields - return list(map(tuple, zip(*buffer))) - - def close(self, keepProgress=True): - """Close the Subscription. - """ - if self._sub is None: - return False - - CTaosInterface.unsubscribe(self._sub, keepProgress) - return True - - -if __name__ == '__main__': - from .connection import TDengineConnection - conn = TDengineConnection( - host="127.0.0.1", - user="root", - password="taosdata", - database="test") - - # Generate a cursor object to run SQL commands - sub = conn.subscribe(True, "test", "select * from meters;", 1000) - - for i in range(0, 10): - data = sub.consume() - for d in data: - print(d) - - sub.close() - conn.close() diff --git a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md index b476c118a7..6845d091b5 100644 --- a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md +++ b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md @@ -8,8 +8,8 @@ 3. mkdir debug; cd debug; cmake ..; make ; sudo make install -4. pip install ../src/connector/python/linux/python2 ; pip3 install - ../src/connector/python/linux/python3 +4. pip install ../src/connector/python ; pip3 install + ../src/connector/python 5. pip install numpy; pip3 install numpy (numpy is required only if you need to run querySort.py) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index e785c8e807..74cbc35208 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -21,7 +21,7 @@ def pre_test(){ cmake .. > /dev/null make > /dev/null make install > /dev/null - pip3 install ${WKC}/src/connector/python/linux/python3/ + pip3 install ${WKC}/src/connector/python ''' return 1 } diff --git a/tests/pytest/concurrent_inquiry.sh b/tests/pytest/concurrent_inquiry.sh index e5918792f4..6ac15fb46f 100755 --- a/tests/pytest/concurrent_inquiry.sh +++ b/tests/pytest/concurrent_inquiry.sh @@ -48,7 +48,7 @@ fi PYTHON_EXEC=python3.8 # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. -export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd) +export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd) # Then let us set up the library path so that our compiled SO file can be loaded by Python export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh index 0af09634df..127e13c5be 100755 --- a/tests/pytest/crash_gen.sh +++ b/tests/pytest/crash_gen.sh @@ -48,7 +48,7 @@ fi PYTHON_EXEC=python3.8 # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. -export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd) +export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd) # Then let us set up the library path so that our compiled SO file can be loaded by Python export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR diff --git a/tests/pytest/hivemq-extension-test.py b/tests/pytest/hivemq-extension-test.py index 3d0b1ef83f..9d293ea5ed 100644 --- a/tests/pytest/hivemq-extension-test.py +++ b/tests/pytest/hivemq-extension-test.py @@ -10,7 +10,7 @@ # ################################################################### # install pip -# pip install src/connector/python/linux/python2/ +# pip install src/connector/python/ import sys import os import os.path diff --git a/tests/pytest/perf_gen.sh b/tests/pytest/perf_gen.sh index fcedd2d407..d28b5422f8 100755 --- a/tests/pytest/perf_gen.sh +++ b/tests/pytest/perf_gen.sh @@ -48,7 +48,7 @@ fi PYTHON_EXEC=python3.8 # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. -export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3:$(pwd) +export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd) # Then let us set up the library path so that our compiled SO file can be loaded by Python export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR diff --git a/tests/pytest/simpletest_no_sudo.sh b/tests/pytest/simpletest_no_sudo.sh index 61faf3df52..36edfc027f 100755 --- a/tests/pytest/simpletest_no_sudo.sh +++ b/tests/pytest/simpletest_no_sudo.sh @@ -4,7 +4,7 @@ # 2. No files are needed outside the development tree, everything is done in the local source code directory # First we need to set up a path for Python to find our own TAOS modules, so that "import" can work. -export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 +export PYTHONPATH=$(pwd)/../../src/connector/python # Then let us set up the library path so that our compiled SO file can be loaded by Python export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib diff --git a/tests/pytest/test.py b/tests/pytest/test.py index c7781f2087..65abd3ef93 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -10,7 +10,7 @@ # ################################################################### # install pip -# pip install src/connector/python/linux/python2/ +# pip install src/connector/python/ # -*- coding: utf-8 -*- import sys diff --git a/tests/pytest/test.sh b/tests/pytest/test.sh index fbb9ba9879..4e74341f70 100755 --- a/tests/pytest/test.sh +++ b/tests/pytest/test.sh @@ -13,7 +13,7 @@ else fi TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1` LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib -export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 +export PYTHONPATH=$(pwd)/../../src/connector/python export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR if [[ "$1" == *"test.py"* ]]; then diff --git a/tests/pytest/testCompress.py b/tests/pytest/testCompress.py index 0f5d9ef3b1..1ad032f05f 100644 --- a/tests/pytest/testCompress.py +++ b/tests/pytest/testCompress.py @@ -10,7 +10,7 @@ # ################################################################### # install pip -# pip install src/connector/python/linux/python2/ +# pip install src/connector/python/ # -*- coding: utf-8 -*- import sys diff --git a/tests/pytest/testMinTablesPerVnode.py b/tests/pytest/testMinTablesPerVnode.py index 91cea833e7..a111113c07 100644 --- a/tests/pytest/testMinTablesPerVnode.py +++ b/tests/pytest/testMinTablesPerVnode.py @@ -10,7 +10,7 @@ # ################################################################### # install pip -# pip install src/connector/python/linux/python2/ +# pip install src/connector/python/ # -*- coding: utf-8 -*- import sys diff --git a/tests/pytest/testNoCompress.py b/tests/pytest/testNoCompress.py index e3b40b4426..d41055c755 100644 --- a/tests/pytest/testNoCompress.py +++ b/tests/pytest/testNoCompress.py @@ -10,7 +10,7 @@ # ################################################################### # install pip -# pip install src/connector/python/linux/python2/ +# pip install src/connector/python/ # -*- coding: utf-8 -*- import sys From f8f091b5c7025839541f6228c7a637ab92f43552 Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Fri, 28 May 2021 10:12:46 +0800 Subject: [PATCH 20/82] [TD-4376] adding test case --- tests/pytest/fulltest.sh | 2 +- tests/pytest/tag_lite/drop_auto_create.py | 47 +++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 tests/pytest/tag_lite/drop_auto_create.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index d8e2a31e70..a7cbbf42c1 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -332,5 +332,5 @@ python3 ./test.py -f tag_lite/alter_tag.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py - +python3 ./test.py -f tag_lite/drop_auto_create.py #======================p4-end=============== diff --git a/tests/pytest/tag_lite/drop_auto_create.py b/tests/pytest/tag_lite/drop_auto_create.py new file mode 100644 index 0000000000..86ec65914a --- /dev/null +++ b/tests/pytest/tag_lite/drop_auto_create.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdSql.execute('create table m1(ts timestamp, k int) tags(a binary(12), b int, c double);') + tdSql.execute('insert into tm0 using m1(b,c) tags(1, 99) values(now, 1);') + tdSql.execute('insert into tm1 using m1(b,c) tags(2, 100) values(now, 2);') + tdLog.info("2 rows inserted") + tdSql.query('select * from m1;') + tdSql.checkRows(2) + tdSql.execute('select *,tbname from m1;') + tdSql.execute("drop table tm0; ") + tdSql.query('select * from m1') + tdSql.checkRows(1) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 6bd6001fe96ff80f12add4246fc4d31293f1731f Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Fri, 28 May 2021 10:43:15 +0800 Subject: [PATCH 21/82] [TD-4376] modify execute to query for select --- tests/pytest/tag_lite/drop_auto_create.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/tag_lite/drop_auto_create.py b/tests/pytest/tag_lite/drop_auto_create.py index 86ec65914a..f89b41008b 100644 --- a/tests/pytest/tag_lite/drop_auto_create.py +++ b/tests/pytest/tag_lite/drop_auto_create.py @@ -31,7 +31,7 @@ class TDTestCase: tdLog.info("2 rows inserted") tdSql.query('select * from m1;') tdSql.checkRows(2) - tdSql.execute('select *,tbname from m1;') + tdSql.query('select *,tbname from m1;') tdSql.execute("drop table tm0; ") tdSql.query('select * from m1') tdSql.checkRows(1) From afb0d66d0368e0dc996d3f521192be1393406d8b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 28 May 2021 10:47:06 +0800 Subject: [PATCH 22/82] [TD-4382]: taosdemo query result file is null. (#6259) * [TD-4382]: taosdemo query result file is null. * modify CI Co-authored-by: Shuduo Sang Co-authored-by: liuyq-617 --- Jenkinsfile | 2 +- src/kit/taosdemo/taosdemo.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index b48dca0241..6b6ef420e6 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -94,7 +94,7 @@ def pre_test(){ make > /dev/null make install > /dev/null cd ${WKC}/tests - pip3 install ${WKC}/src/connector/python + pip3 install ${WKC}/src/connector/python/ ''' return 1 } diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 29a1f7f1f9..3c528ab26f 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -6261,7 +6261,7 @@ static void *specifiedTableQuery(void *sarg) { uint64_t lastPrintTime = taosGetTimestampMs(); uint64_t startTs = taosGetTimestampMs(); - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq] != NULL) { + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); @@ -6362,7 +6362,7 @@ static void *superTableQuery(void *sarg) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { memset(sqlstr,0,sizeof(sqlstr)); replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); - if (g_queryInfo.superQueryInfo.result[j] != NULL) { + if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.superQueryInfo.result[j], pThreadInfo->threadID); @@ -6796,7 +6796,7 @@ static void *specifiedSubscribe(void *sarg) { "taosdemo-subscribe-%"PRIu64"-%d", pThreadInfo->querySeq, pThreadInfo->threadID); - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq] != NULL) { + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); From 398397bcaaa082b004b0b359fcdbfe849e57b7de Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 28 May 2021 11:10:20 +0800 Subject: [PATCH 23/82] change to asynchrous call mode with support last time query --- src/client/src/tscStream.c | 127 +++++++++++++++++++++---------------- src/cq/src/cqMain.c | 7 +- 2 files changed, 79 insertions(+), 55 deletions(-) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 9094f95dfc..7e6132b7c8 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -25,6 +25,7 @@ #include "tutil.h" #include "tscProfile.h" +#include "tscSubquery.h" static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows); static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOfRows); @@ -538,31 +539,7 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) { return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer; } -///* -// -// get tableName last row time, if have error return zero. -// -static int64_t tscGetStreamTableLastTime(SSqlObj* pSql, SSqlStream* pStream, const char* tableName) { - int64_t last_time = 0; - char sql[128] = ""; - sprintf(sql, "select last_row(*) from %s;", tableName); - // query sql - TAOS_RES* res = taos_query(pSql->pTscObj, sql); - if(res == NULL) - return 0; - - // only fetch one row - TAOS_ROW row = taos_fetch_row(res); - if( row && row[0] ) { - last_time = *((int64_t*)row[0]); - } - - // free and return - taos_free_result(res); - return last_time; -} -//*/ static void tscCreateStream(void *param, TAOS_RES *res, int code) { SSqlStream* pStream = (SSqlStream*)param; SSqlObj* pSql = pStream->pSql; @@ -596,15 +573,12 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime); - // set output table last record time to stime if have, why do this, because continue with last brea + // set stime with ltime if ltime > stime const char* dstTable = pStream->dstTable? pStream->dstTable: ""; - int64_t last_time = tscGetStreamTableLastTime(pSql, pStream, dstTable); - pStream->ltime = last_time; - tscDebug(" CQ get table=%s lasttime=%"PRId64" end.", dstTable, last_time); - if(last_time > 0 && last_time > pStream->stime) { - // can replace stime with last row time - tscDebug(" CQ set table %s stime=%"PRId64" with lasttime=%"PRId64" ", dstTable, pStream->stime, last_time); - pStream->stime = last_time; + tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime); + if(pStream->ltime > 0 && pStream->ltime > pStream->stime) { + tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" ", dstTable, pStream->stime, pStream->ltime); + pStream->stime = pStream->ltime; } int64_t starttime = tscGetLaunchTimestamp(pStream); @@ -622,25 +596,66 @@ void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) { pStream->dstTable = dstTable; } -// already run on another thread -void tscCreateStreamThread(SSchedMsg* pMsg) { - tscDebug(" new thread Sched call tscCreateStream begin..."); - tscCreateStream(pMsg->ahandle, NULL, 0); - tscDebug(" new thread Sched call tscCreateStream end."); +// fetchFp call back +void fetchFpStreamLastRow(void* param ,TAOS_RES* res, int num) { + SSqlStream* pStream = (SSqlStream*)param; + SSqlObj* pSql = res; + + // get row data set to ltime + tscSetSqlOwner(pSql); + TAOS_ROW row = doSetResultRowData(pSql); + if( row && row[0] ) { + pStream->ltime = *((int64_t*)row[0]); + const char* dstTable = pStream->dstTable? pStream->dstTable: ""; + tscDebug(" CQ stream table=%s last row time=%"PRId64" .", dstTable, pStream->ltime); + } + tscClearSqlOwner(pSql); + + // no condition call + tscCreateStream(param, pStream->pSql, TSDB_CODE_SUCCESS); + taos_free_result(res); +} + +// fp callback +void fpStreamLastRow(void* param ,TAOS_RES* res, int code) { + // check result successful + if (code != TSDB_CODE_SUCCESS) { + tscCreateStream(param, res, TSDB_CODE_SUCCESS); + taos_free_result(res); + return ; + } + + // asynchronous fetch last row data + taos_fetch_rows_a(res, fetchFpStreamLastRow, param); +} + +void cbParseSql(void* param, TAOS_RES* res, int code) { + // check result successful + SSqlStream* pStream = (SSqlStream*)param; + SSqlObj* pSql = pStream->pSql; + SSqlCmd* pCmd = &pSql->cmd; + if (code != TSDB_CODE_SUCCESS) { + pSql->res.code = code; + tscDebug("0x%"PRIx64" open stream parse sql failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code)); + pStream->fp(pStream->param, NULL, NULL); + return; + } + + // check dstTable valid + if(pStream->dstTable == NULL || strlen(pStream->dstTable) == 0) { + tscDebug(" cbParseSql dstTable is empty."); + tscCreateStream(param, res, code); + return ; + } + + // query stream last row time async + char sql[128] = ""; + sprintf(sql, "select last_row(*) from %s;", pStream->dstTable); + taos_query_a(pSql->pTscObj, sql, fpStreamLastRow, param); return ; } -// parsesql async response return and change run thread -void tsParseSqlRet(void* param, TAOS_RES* res, int code) { - SSchedMsg schedMsg = { 0 }; - schedMsg.fp = tscCreateStreamThread; - schedMsg.ahandle = param; - schedMsg.thandle = res; - schedMsg.msg = NULL; - taosScheduleTask(tscQhandle, &schedMsg); -} - -TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), +TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *)) { STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) return NULL; @@ -671,6 +686,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p pSql->pStream = pStream; pSql->param = pStream; pSql->maxRetry = TSDB_MAX_REPLICA; + tscSetStreamDestTable(pStream, dstTable); pSql->sqlstr = calloc(1, strlen(sqlstr) + 1); if (pSql->sqlstr == NULL) { @@ -685,16 +701,16 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p tscDebugL("%p SQL: %s", pSql, pSql->sqlstr); tsem_init(&pSql->rspSem, 0, 0); - pSql->fp = tsParseSqlRet; - pSql->fetchFp = tsParseSqlRet; + pSql->fp = cbParseSql; + pSql->fetchFp = cbParseSql; registerSqlObj(pSql); - + int32_t code = tsParseSql(pSql, true); if (code == TSDB_CODE_SUCCESS) { - tscCreateStream(pStream, pSql, code); + cbParseSql(pStream, pSql, code); } else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - tscDebug(" cq parseSql IN Process pass. "); + tscDebug(" CQ taso_open_stream IN Process. sql=%s", sqlstr); } else { tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code)); taosReleaseRef(tscObjRef, pSql->self); @@ -705,6 +721,11 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p return pStream; } +TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + int64_t stime, void *param, void (*callback)(void *)) { + return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback); +} + void taos_close_stream(TAOS_STREAM *handle) { SSqlStream *pStream = (SSqlStream *)handle; diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index 5d5d5f339e..ee4be02b90 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -437,6 +437,10 @@ static void cqProcessCreateTimer(void *param, void *tmrId) { taosReleaseRef(cqObjRef, (int64_t)param); } +// inner implement in tscStream.c +TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* desName, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + int64_t stime, void *param, void (*callback)(void *)); + static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { pObj->pContext = pContext; @@ -449,11 +453,10 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { pObj->tmrId = 0; if (pObj->pStream == NULL) { - pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL); + pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL); // TODO the pObj->pStream may be released if error happens if (pObj->pStream) { - tscSetStreamDestTable(pObj->pStream, pObj->dstTable); pContext->num++; cDebug("vgId:%d, id:%d CQ:%s is opened", pContext->vgId, pObj->tid, pObj->sqlStr); } else { From c7764d44780c89e7b1f46c80e4e96dfeea3068f2 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 28 May 2021 13:00:12 +0800 Subject: [PATCH 24/82] ltime > stime to do replace --- src/client/src/tscStream.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 7e6132b7c8..2226c3d95d 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -576,7 +576,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { // set stime with ltime if ltime > stime const char* dstTable = pStream->dstTable? pStream->dstTable: ""; tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime); - if(pStream->ltime > 0 && pStream->ltime > pStream->stime) { + if(pStream->ltime > pStream->stime) { tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" ", dstTable, pStream->stime, pStream->ltime); pStream->stime = pStream->ltime; } From 5bc050ccdf122e48d9915ace7a357e898a58be03 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 28 May 2021 14:19:16 +0800 Subject: [PATCH 25/82] TD-4393 --- src/vnode/src/vnodeMgmt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 32f9532138..7e6022fc87 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -91,18 +91,18 @@ static void vnodeIncRef(void *ptNode) { } void *vnodeAcquire(int32_t vgId) { - SVnodeObj **ppVnode = NULL; + SVnodeObj *pVnode = NULL; if (tsVnodesHash != NULL) { - ppVnode = taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *)); + taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *)); } - if (ppVnode == NULL || *ppVnode == NULL) { + if (pVnode == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; vDebug("vgId:%d, not exist", vgId); return NULL; } - return *ppVnode; + return pVnode; } void vnodeRelease(void *vparam) { From f382d2b9dd18f296807c35b1e86fef095db92d1c Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 28 May 2021 15:16:20 +0800 Subject: [PATCH 26/82] fix bug --- src/client/src/tscParseInsert.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index f54237306c..f1dd8975dc 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -769,6 +769,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC index = 0; sToken = tStrGetToken(sql, &index, false); + if (sToken.type == TK_ILLEGAL) { + return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z); + } + if (sToken.type == TK_RP) { break; } From 1465fd9cfc7d35125401e96a75ffcd63f988f12e Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 28 May 2021 16:50:43 +0800 Subject: [PATCH 27/82] Fix test failure --- tests/pytest/table/tablename-boundary.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/pytest/table/tablename-boundary.py b/tests/pytest/table/tablename-boundary.py index 8766a9d4a9..dc22c3343b 100644 --- a/tests/pytest/table/tablename-boundary.py +++ b/tests/pytest/table/tablename-boundary.py @@ -74,9 +74,7 @@ class TDTestCase: tdSql.execute("insert into %s.%s using %s.%s tags(2) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name2, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2)) tdSql.query("show %s.tables" % db_name) - tdSql.checkRows(2) - tdSql.checkData(0, 0, tb_name1) - tdSql.checkData(1, 0, tb_name2) + tdSql.checkRows(2) tdSql.query("select * from %s.%s" % (db_name, stb_name)) tdSql.checkRows(6) From 72e2c66d86a6f37904d552422b310f23c456d115 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 28 May 2021 17:59:32 +0800 Subject: [PATCH 28/82] [TD-4068]: taosdemo support stmt. (#6270) * [TD-4068]: taosdemo support stmt. for easy merge purpose. disabled in master. * fix clang compile error. --- src/kit/taosdemo/taosdemo.c | 1469 ++++++++++++++++++++++++----------- 1 file changed, 1020 insertions(+), 449 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3c528ab26f..70ddbacf44 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -19,6 +19,7 @@ */ #include +#include #define _GNU_SOURCE #define CURL_STATICLIB @@ -52,6 +53,8 @@ #include "taoserror.h" #include "tutil.h" +#define STMT_IFACE_ENABLED 0 + #define REQ_EXTRA_BUF_LEN 1024 #define RESP_BUF_LEN 4096 @@ -61,6 +64,8 @@ extern char configDir[]; #define QUERY_JSON_NAME "query.json" #define SUBSCRIBE_JSON_NAME "subscribe.json" +#define STR_INSERT_INTO "INSERT INTO " + enum TEST_MODE { INSERT_TEST, // 0 QUERY_TEST, // 1 @@ -70,6 +75,8 @@ enum TEST_MODE { #define MAX_RECORDS_PER_REQ 32766 +#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into .. + #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) #define COND_BUF_LEN BUFFER_SIZE - 30 @@ -120,17 +127,24 @@ enum enumSYNC_MODE { MODE_BUT }; +enum enum_TAOS_INTERFACE { + TAOSC_IFACE, + REST_IFACE, + STMT_IFACE, + INTERFACE_BUT +}; + typedef enum enumQUERY_CLASS { SPECIFIED_CLASS, STABLE_CLASS, CLASS_BUT } QUERY_CLASS; -typedef enum enum_INSERT_MODE { +typedef enum enum_PROGRESSIVE_OR_INTERLACE { PROGRESSIVE_INSERT_MODE, INTERLACE_INSERT_MODE, INVALID_INSERT_MODE -} INSERT_MODE; +} PROG_OR_INTERLACE_MODE; typedef enum enumQUERY_TYPE { NO_INSERT_TYPE, @@ -196,6 +210,7 @@ typedef struct SArguments_S { uint32_t test_mode; char * host; uint16_t port; + uint16_t iface; char * user; char * password; char * database; @@ -217,13 +232,13 @@ typedef struct SArguments_S { uint32_t num_of_threads; uint64_t insert_interval; int64_t query_times; - uint64_t interlace_rows; - uint64_t num_of_RPR; // num_of_records_per_req + uint32_t interlace_rows; + uint32_t num_of_RPR; // num_of_records_per_req uint64_t max_sql_len; int64_t num_of_tables; int64_t num_of_DPT; int abort; - int disorderRatio; // 0: no disorder, >0: x% + uint32_t disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision uint32_t method_of_delete; char ** arg_list; @@ -246,12 +261,12 @@ typedef struct SSuperTable_S { uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample - char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest + uint16_t iface; // 0: taosc, 1: rest, 2: stmt int64_t childTblLimit; uint64_t childTblOffset; // int multiThreadWriteOneTbl; // 0: no, 1: yes - uint64_t interlaceRows; // + uint32_t interlaceRows; // int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision uint64_t maxSqlLen; // @@ -364,7 +379,7 @@ typedef struct SDbs_S { typedef struct SpecifiedQueryInfo_S { uint64_t queryInterval; // 0: unlimit > 0 loop/s uint32_t concurrent; - int sqlCount; + uint64_t sqlCount; uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms uint64_t queryTimes; @@ -392,7 +407,7 @@ typedef struct SuperQueryInfo_S { uint64_t queryTimes; int64_t childTblCount; char childTblPrefix[MAX_TB_NAME_SIZE]; - int sqlCount; + uint64_t sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; int resubAfterConsume; @@ -420,6 +435,7 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; + TAOS_STMT *stmt; int threadID; char db_name[MAX_DB_NAME_SIZE+1]; uint32_t time_precision; @@ -434,6 +450,7 @@ typedef struct SThreadInfo_S { char* cols; bool use_metric; SSuperTable* superTblInfo; + char *buffer; // sql cmd buffer // for async insert tsem_t lock_sem; @@ -541,7 +558,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, char* sqlstr, threadInfo *pThreadInfo); static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, - int disorderRatio, int disorderRange); + int disorderRatio, int disorderRange); /* ************ Global variables ************ */ @@ -557,6 +574,7 @@ SArguments g_args = { 0, // test_mode "127.0.0.1", // host 6030, // port + TAOSC_IFACE, // iface "root", // user #ifdef _TD_POWER_ "powerdb", // password @@ -673,6 +691,8 @@ static void printHelp() { "The host to connect to TDengine. Default is localhost."); printf("%s%s%s%s\n", indent, "-p", indent, "The TCP/IP port number to use for the connection. Default is 0."); + printf("%s%s%s%s\n", indent, "-I", indent, + "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'."); printf("%s%s%s%s\n", indent, "-d", indent, "Destination database. Default is 'test'."); printf("%s%s%s%s\n", indent, "-a", indent, @@ -761,6 +781,23 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->port = atoi(argv[++i]); + } else if (strcmp(argv[i], "-I") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-I need a valid string following!\n"); + exit(EXIT_FAILURE); + } + ++i; + if (0 == strcasecmp(argv[i], "taosc")) { + arguments->iface = TAOSC_IFACE; + } else if (0 == strcasecmp(argv[i], "rest")) { + arguments->iface = REST_IFACE; + } else if (0 == strcasecmp(argv[i], "stmt")) { + arguments->iface = STMT_IFACE; + } else { + errorPrint("%s", "\n\t-I need a valid string following!\n"); + exit(EXIT_FAILURE); + } } else if (strcmp(argv[i], "-u") == 0) { if (argc == i+1) { printHelp(); @@ -897,6 +934,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(argv[i], "BIGINT") && strcasecmp(argv[i], "DOUBLE") && strcasecmp(argv[i], "BINARY") + && strcasecmp(argv[i], "TIMESTAMP") && strcasecmp(argv[i], "NCHAR")) { printHelp(); errorPrint("%s", "-b: Invalid data_type!\n"); @@ -918,6 +956,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(token, "BIGINT") && strcasecmp(token, "DOUBLE") && strcasecmp(token, "BINARY") + && strcasecmp(token, "TIMESTAMP") && strcasecmp(token, "NCHAR")) { printHelp(); free(g_dupstr); @@ -1019,6 +1058,19 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } } + int columnCount; + for (columnCount = 0; columnCount < MAX_NUM_DATATYPE; columnCount ++) { + if (g_args.datatype[columnCount] == NULL) { + break; + } + } + + if (0 == columnCount) { + perror("data type error!"); + exit(-1); + } + g_args.num_of_CPR = columnCount; + if (((arguments->debug_print) && (arguments->metaFile == NULL)) || arguments->verbose_print) { printf("###################################################################\n"); @@ -1028,7 +1080,8 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->port ); printf("# User: %s\n", arguments->user); printf("# Password: %s\n", arguments->password); - printf("# Use metric: %s\n", arguments->use_metric ? "true" : "false"); + printf("# Use metric: %s\n", + arguments->use_metric ? "true" : "false"); if (*(arguments->datatype)) { printf("# Specified data type: "); for (int i = 0; i < MAX_NUM_DATATYPE; i++) @@ -1040,7 +1093,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } printf("# Insertion interval: %"PRIu64"\n", arguments->insert_interval); - printf("# Number of records per req: %"PRIu64"\n", + printf("# Number of records per req: %u\n", arguments->num_of_RPR); printf("# Max SQL length: %"PRIu64"\n", arguments->max_sql_len); @@ -1068,8 +1121,6 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } static bool getInfoFromJsonFile(char* file); -//static int generateOneRowDataForStb(SSuperTable* stbInfo); -//static int getDataIntoMemForStb(SSuperTable* stbInfo); static void init_rand_data(); static void tmfclose(FILE *fp) { if (NULL != fp) { @@ -1088,7 +1139,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { TAOS_RES *res = NULL; int32_t code = -1; - for (i = 0; i < 5; i++) { + for (i = 0; i < 5 /* retry */; i++) { if (NULL != res) { taos_free_result(res); res = NULL; @@ -1104,7 +1155,8 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); if (code != 0) { if (!quiet) { - errorPrint("Failed to execute %s, reason: %s\n", command, taos_errstr(res)); + errorPrint("Failed to execute %s, reason: %s\n", + command, taos_errstr(res)); } taos_free_result(res); //taos_close(taos); @@ -1320,6 +1372,8 @@ static void init_rand_data() { static int printfInsertMeta() { SHOW_PARSE_RESULT_START(); + printf("interface: \033[33m%s\033[0m\n", + (g_args.iface==TAOSC_IFACE)?"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt"); printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); printf("user: \033[33m%s\033[0m\n", g_Dbs.user); @@ -1331,7 +1385,7 @@ static int printfInsertMeta() { g_Dbs.threadCountByCreateTbl); printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", g_args.insert_interval); - printf("number of records per req: \033[33m%"PRIu64"\033[0m\n", + printf("number of records per req: \033[33m%u\033[0m\n", g_args.num_of_RPR); printf("max sql length: \033[33m%"PRIu64"\033[0m\n", g_args.max_sql_len); @@ -1437,8 +1491,9 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].childTblPrefix); printf(" dataSource: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].dataSource); - printf(" insertMode: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].insertMode); + printf(" iface: \033[33m%s\033[0m\n", + (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": + (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].childTblLimit); @@ -1456,7 +1511,7 @@ static int printfInsertMeta() { printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); } */ - printf(" interlaceRows: \033[33m%"PRIu64"\033[0m\n", + printf(" interlaceRows: \033[33m%u\033[0m\n", g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { @@ -1534,7 +1589,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); - fprintf(fp, "number of records per req: %"PRIu64"\n", g_args.num_of_RPR); + fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR); fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len); fprintf(fp, "database count: %d\n", g_Dbs.dbCount); @@ -1626,11 +1681,12 @@ static void printfInsertMetaToFile(FILE* fp) { g_Dbs.db[i].superTbls[j].childTblPrefix); fprintf(fp, " dataSource: %s\n", g_Dbs.db[i].superTbls[j].dataSource); - fprintf(fp, " insertMode: %s\n", - g_Dbs.db[i].superTbls[j].insertMode); + fprintf(fp, " iface: %s\n", + (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": + (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " interlace rows: %"PRIu64"\n", + fprintf(fp, " interlace rows: %u\n", g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { fprintf(fp, " stable insert interval: %"PRIu64"\n", @@ -1643,7 +1699,7 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, " multiThreadWriteOneTbl: yes\n"); } */ - fprintf(fp, " interlaceRows: %"PRIu64"\n", + fprintf(fp, " interlaceRows: %u\n", g_Dbs.db[i].superTbls[j].interlaceRows); fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); @@ -1719,7 +1775,7 @@ static void printfQueryMeta() { if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) { printf("specified table query info: \n"); - printf("sqlCount: \033[33m%d\033[0m\n", + printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { printf("specified tbl query times:\n"); @@ -1739,15 +1795,15 @@ static void printfQueryMeta() { printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", + for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); } printf("super table query info:\n"); - printf("sqlCount: \033[33m%d\033[0m\n", + printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); if (g_queryInfo.superQueryInfo.sqlCount > 0) { @@ -2803,7 +2859,7 @@ static int createDatabasesAndStables() { int validStbCount = 0; - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); @@ -2813,7 +2869,7 @@ static int createDatabasesAndStables() { &g_Dbs.db[i].superTbls[j]); if (0 != ret) { - errorPrint("create super table %d failed!\n\n", j); + errorPrint("create super table %"PRIu64" failed!\n\n", j); continue; } } @@ -2841,7 +2897,7 @@ static void* createTable(void *sarg) threadInfo *pThreadInfo = (threadInfo *)sarg; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - int64_t lastPrintTime = taosGetTimestampMs(); + uint64_t lastPrintTime = taosGetTimestampMs(); int buff_len; buff_len = BUFFER_SIZE / 8; @@ -2915,7 +2971,7 @@ static void* createTable(void *sarg) return NULL; } - int64_t currentPrintTime = taosGetTimestampMs(); + uint64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", pThreadInfo->threadID, pThreadInfo->start_table_from, i); @@ -2934,11 +2990,11 @@ static void* createTable(void *sarg) } static int startMultiThreadCreateChildTable( - char* cols, int threads, uint64_t startFrom, int64_t ntables, + char* cols, int threads, uint64_t tableFrom, int64_t ntables, char* db_name, SSuperTable* superTblInfo) { pthread_t *pids = malloc(threads * sizeof(pthread_t)); - threadInfo *infos = malloc(threads * sizeof(threadInfo)); + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { printf("malloc failed\n"); @@ -2978,10 +3034,10 @@ static int startMultiThreadCreateChildTable( return -1; } - pThreadInfo->start_table_from = startFrom; + pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; - startFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; pThreadInfo->use_metric = true; pThreadInfo->cols = cols; pThreadInfo->minDelay = UINT64_MAX; @@ -3011,7 +3067,7 @@ static void createChildTables() { if (g_Dbs.use_metric) { if (g_Dbs.db[i].superTblCount > 0) { // with super table - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { continue; @@ -3019,15 +3075,15 @@ static void createChildTables() { verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - uint64_t startFrom = 0; + uint64_t tableFrom = 0; g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n", - __func__, __LINE__, g_totalChildTables, startFrom); + __func__, __LINE__, g_totalChildTables, tableFrom); startMultiThreadCreateChildTable( g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, g_Dbs.threadCountByCreateTbl, - startFrom, + tableFrom, g_Dbs.db[i].superTbls[j].childTblCount, g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); } @@ -3036,11 +3092,14 @@ static void createChildTables() { // normal table len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); for (int j = 0; j < g_args.num_of_CPR; j++) { - if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) + if (g_args.datatype[j] + && ((strncasecmp(g_args.datatype[j], + "BINARY", strlen("BINARY")) == 0) || (strncasecmp(g_args.datatype[j], - "NCHAR", strlen("NCHAR")) == 0)) { + "NCHAR", strlen("NCHAR")) == 0))) { snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); + ", COL%d %s(%d)", j, g_args.datatype[j], + g_args.len_of_binary); } else { snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ", COL%d %s", j, g_args.datatype[j]); @@ -3132,10 +3191,12 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { return 0; } +#if 0 int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) { // TODO return 0; } +#endif /* Read 10000 lines at most. If more than 10000 lines, continue to read after using @@ -3520,9 +3581,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // rows per table need be less than insert batch if (g_args.interlace_rows > g_args.num_of_RPR) { - printf("NOTICE: interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n", + printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", g_args.interlace_rows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n", + printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", g_args.num_of_RPR); prompt(); g_args.interlace_rows = g_args.num_of_RPR; @@ -3837,15 +3898,24 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest - if (insertMode && insertMode->type == cJSON_String - && insertMode->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, - insertMode->valuestring, MAX_DB_NAME_SIZE); - } else if (!insertMode) { - tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE); + cJSON *stbIface = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt + if (stbIface && stbIface->type == cJSON_String + && stbIface->valuestring != NULL) { + if (0 == strcasecmp(stbIface->valuestring, "taosc")) { + g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE; + } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { + g_Dbs.db[i].superTbls[j].iface= REST_IFACE; + } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { + g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; + } else { + errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n", + __func__, __LINE__, stbIface->valuestring); + goto PARSE_OVER; + } + } else if (!stbIface) { + g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; } else { - printf("ERROR: failed to read json, insert_mode not found\n"); + errorPrint("%s", "failed to read json, insert_mode not found\n"); goto PARSE_OVER; } @@ -3936,9 +4006,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - cJSON* maxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); - if (maxSqlLen && maxSqlLen->type == cJSON_Number) { - int32_t len = maxSqlLen->valueint; + cJSON* stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); + if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) { + int32_t len = stbMaxSqlLen->valueint; if (len > TSDB_MAX_ALLOWED_SQL_LEN) { len = TSDB_MAX_ALLOWED_SQL_LEN; } else if (len < 5) { @@ -3948,7 +4018,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxSqlLen) { g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { - errorPrint("%s() LN%d, failed to read json, maxSqlLen input mistake\n", + errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n", __func__, __LINE__); goto PARSE_OVER; } @@ -3970,24 +4040,25 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } */ - cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); - if (interlaceRows && interlaceRows->type == cJSON_Number) { - if (interlaceRows->valueint < 0) { + cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); + if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { + if (stbInterlaceRows->valueint < 0) { errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n", __func__, __LINE__); goto PARSE_OVER; } - g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint; + g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint; // rows per table need be less than insert batch if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { - printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %"PRIu64" > num_of_records_per_req %"PRIu64"\n\n", - i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %"PRIu64"\n\n", + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %u > num_of_records_per_req %u\n\n", + i, j, g_Dbs.db[i].superTbls[j].interlaceRows, + g_args.num_of_RPR); + printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", g_args.num_of_RPR); prompt(); g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR; } - } else if (!interlaceRows) { + } else if (!stbInterlaceRows) { g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { errorPrint( @@ -4199,7 +4270,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (concurrent && concurrent->type == cJSON_Number) { if (concurrent->valueint <= 0) { errorPrint( - "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + "%s() LN%d, query sqlCount %"PRIu64" or concurrent %d is not correct.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); @@ -4613,7 +4684,7 @@ static void prepareSampleData() { static void postFreeResource() { tmfclose(g_fpOfInsertResult); for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; @@ -4661,16 +4732,22 @@ static int getRowDataFromSample( return dataLen; } -static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) { +static int64_t generateStbRowData( + SSuperTable* stbInfo, + char* recBuf, int64_t timestamp) +{ int64_t dataLen = 0; char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { - if ((0 == strncasecmp(stbInfo->columns[i].dataType, "BINARY", strlen("BINARY"))) - || (0 == strncasecmp(stbInfo->columns[i].dataType, "NCHAR", strlen("NCHAR")))) { + if ((0 == strncasecmp(stbInfo->columns[i].dataType, + "BINARY", strlen("BINARY"))) + || (0 == strncasecmp(stbInfo->columns[i].dataType, + "NCHAR", strlen("NCHAR")))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); @@ -4686,23 +4763,23 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", 3)) { + "INT", strlen("INT"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d,", rand_int()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", 6)) { + "BIGINT", strlen("BIGINT"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64",", rand_bigint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", 5)) { + "FLOAT", strlen("FLOAT"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%f,", rand_float()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", 6)) { + "DOUBLE", strlen("DOUBLE"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%f,", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", 8)) { + "SMALLINT", strlen("SMALLINT"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d,", rand_smallint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, @@ -4732,46 +4809,38 @@ static int64_t generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stb } static int64_t generateData(char *recBuf, char **data_type, - int num_of_cols, int64_t timestamp, int lenOfBinary) { + int64_t timestamp, int lenOfBinary) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; pstr += sprintf(pstr, "(%" PRId64, timestamp); - int c = 0; - for (; c < MAX_NUM_DATATYPE; c++) { - if (data_type[c] == NULL) { - break; - } - } + int columnCount = g_args.num_of_CPR; - if (0 == c) { - perror("data type error!"); - exit(-1); - } - - for (int i = 0; i < c; i++) { - if (strcasecmp(data_type[i % c], "TINYINT") == 0) { + for (int i = 0; i < columnCount; i++) { + if (strcasecmp(data_type[i % columnCount], "TINYINT") == 0) { pstr += sprintf(pstr, ",%d", rand_tinyint() ); - } else if (strcasecmp(data_type[i % c], "SMALLINT") == 0) { + } else if (strcasecmp(data_type[i % columnCount], "SMALLINT") == 0) { pstr += sprintf(pstr, ",%d", rand_smallint()); - } else if (strcasecmp(data_type[i % c], "INT") == 0) { + } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) { pstr += sprintf(pstr, ",%d", rand_int()); - } else if (strcasecmp(data_type[i % c], "BIGINT") == 0) { + } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) { pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); - } else if (strcasecmp(data_type[i % c], "FLOAT") == 0) { + } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) { + pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); + } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) { pstr += sprintf(pstr, ",%10.4f", rand_float()); - } else if (strcasecmp(data_type[i % c], "DOUBLE") == 0) { + } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) { double t = rand_double(); pstr += sprintf(pstr, ",%20.8f", t); - } else if (strcasecmp(data_type[i % c], "BOOL") == 0) { + } else if (strcasecmp(data_type[i % columnCount], "BOOL") == 0) { bool b = rand_bool() & 1; pstr += sprintf(pstr, ",%s", b ? "true" : "false"); - } else if (strcasecmp(data_type[i % c], "BINARY") == 0) { + } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) { char *s = malloc(lenOfBinary); rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); free(s); - } else if (strcasecmp(data_type[i % c], "NCHAR") == 0) { + } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) { char *s = malloc(lenOfBinary); rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -4818,35 +4887,60 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { return 0; } -static int64_t execInsert(threadInfo *pThreadInfo, char *buffer, uint64_t k) +static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { - int affectedRows; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + int32_t affectedRows; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, - __func__, __LINE__, buffer); - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { - affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); - } else if (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest"))) { - if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, - buffer, NULL /* not set result file */)) { - affectedRows = -1; - printf("========restful return fail, threadID[%d]\n", - pThreadInfo->threadID); - } else { - affectedRows = k; - } - } else { - errorPrint("%s() LN%d: unknown insert mode: %s\n", - __func__, __LINE__, superTblInfo->insertMode); - affectedRows = 0; + verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, + __func__, __LINE__, pThreadInfo->buffer); + + uint16_t iface; + if (superTblInfo) + iface = superTblInfo->iface; + else + iface = g_args.iface; + + debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, + __func__, __LINE__, + (g_args.iface==TAOSC_IFACE)? + "taosc":(g_args.iface==REST_IFACE)?"rest":"stmt"); + + switch(iface) { + case TAOSC_IFACE: + affectedRows = queryDbExec( + pThreadInfo->taos, + pThreadInfo->buffer, INSERT_TYPE, false); + break; + + case REST_IFACE: + if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, + pThreadInfo->buffer, NULL /* not set result file */)) { + affectedRows = -1; + printf("========restful return fail, threadID[%d]\n", + pThreadInfo->threadID); + } else { + affectedRows = k; + } + break; + + case STMT_IFACE: + debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt); + if (0 != taos_stmt_execute(pThreadInfo->stmt)) { + errorPrint("%s() LN%d, failied to execute insert statement\n", + __func__, __LINE__); + exit(-1); + } + affectedRows = k; + break; + + default: + errorPrint("%s() LN%d: unknown insert mode: %d\n", + __func__, __LINE__, superTblInfo->iface); + affectedRows = 0; } - } else { - affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); - } - return affectedRows; + return affectedRows; } static void getTableName(char *pTblName, @@ -4874,90 +4968,48 @@ static void getTableName(char *pTblName, } } -static int64_t generateDataTail( - SSuperTable* superTblInfo, - uint64_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, - uint64_t startFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) { - uint64_t len = 0; - uint32_t ncols_per_record = 1; // count first col ts +static int32_t generateDataTailWithoutStb( + uint32_t batch, char* buffer, + int64_t remainderBufLen, int64_t insertRows, + uint64_t recordFrom, int64_t startTime, + /* int64_t *pSamplePos, */int64_t *dataLen) { + uint64_t len = 0; char *pstr = buffer; - if (superTblInfo == NULL) { - uint32_t datatypeSeq = 0; - while(g_args.datatype[datatypeSeq]) { - datatypeSeq ++; - ncols_per_record ++; - } - } + verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); - verbosePrint("%s() LN%d batch=%"PRIu64"\n", __func__, __LINE__, batch); - - bool tsRand; - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, - "rand", strlen("rand")))) { - tsRand = true; - } else { - tsRand = false; - } - - uint64_t k = 0; + int32_t k = 0; for (k = 0; k < batch;) { char data[MAX_DATA_SIZE]; memset(data, 0, MAX_DATA_SIZE); int64_t retLen = 0; - if (superTblInfo) { - if (tsRand) { - retLen = generateRowData( - data, - startTime + getTSRandTail( - superTblInfo->timeStampStep, k, - superTblInfo->disorderRatio, - superTblInfo->disorderRange), - superTblInfo); - } else { - retLen = getRowDataFromSample( - data, - remainderBufLen, - startTime + superTblInfo->timeStampStep * k, - superTblInfo, - pSamplePos); - } - if (retLen > remainderBufLen) { - break; - } + char **data_type = g_args.datatype; + int lenOfBinary = g_args.len_of_binary; - pstr += snprintf(pstr , retLen + 1, "%s", data); - k++; - len += retLen; - remainderBufLen -= retLen; - } else { - char **data_type = g_args.datatype; - int lenOfBinary = g_args.len_of_binary; - retLen = generateData(data, data_type, - ncols_per_record, - startTime + getTSRandTail( - DEFAULT_TIMESTAMP_STEP, k, - g_args.disorderRatio, - g_args.disorderRange), - lenOfBinary); - if (len > remainderBufLen) - break; + retLen = generateData(data, data_type, + startTime + getTSRandTail( + (int64_t) DEFAULT_TIMESTAMP_STEP, k, + g_args.disorderRatio, + g_args.disorderRange), + lenOfBinary); - pstr += sprintf(pstr, "%s", data); - k++; - len += retLen; - remainderBufLen -= retLen; - } + if (len > remainderBufLen) + break; - verbosePrint("%s() LN%d len=%"PRIu64" k=%"PRIu64" \nbuffer=%s\n", + pstr += sprintf(pstr, "%s", data); + k++; + len += retLen; + remainderBufLen -= retLen; + + verbosePrint("%s() LN%d len=%"PRIu64" k=%d \nbuffer=%s\n", __func__, __LINE__, len, k, buffer); - startFrom ++; + recordFrom ++; - if (startFrom >= insertRows) { + if (recordFrom >= insertRows) { break; } } @@ -4966,17 +5018,121 @@ static int64_t generateDataTail( return k; } -static int generateSQLHead(char *tableName, int32_t tableSeq, - threadInfo* pThreadInfo, SSuperTable* superTblInfo, +static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, + int disorderRatio, int disorderRange) +{ + int64_t randTail = timeStampStep * seq; + if (disorderRatio > 0) { + int rand_num = taosRandom() % 100; + if(rand_num < disorderRatio) { + randTail = (randTail + + (taosRandom() % disorderRange + 1)) * (-1); + debugPrint("rand data generated, back %"PRId64"\n", randTail); + } + } + + return randTail; +} + +static int32_t generateStbDataTail( + SSuperTable* superTblInfo, + uint32_t batch, char* buffer, + int64_t remainderBufLen, int64_t insertRows, + uint64_t recordFrom, int64_t startTime, + int64_t *pSamplePos, int64_t *dataLen) { + uint64_t len = 0; + + char *pstr = buffer; + + bool tsRand; + if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + tsRand = true; + } else { + tsRand = false; + } + verbosePrint("%s() LN%d batch=%u\n", __func__, __LINE__, batch); + + int32_t k = 0; + for (k = 0; k < batch;) { + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + + int64_t retLen = 0; + + if (tsRand) { + retLen = generateStbRowData(superTblInfo, data, + startTime + getTSRandTail( + superTblInfo->timeStampStep, k, + superTblInfo->disorderRatio, + superTblInfo->disorderRange) + ); + } else { + retLen = getRowDataFromSample( + data, + remainderBufLen, + startTime + superTblInfo->timeStampStep * k, + superTblInfo, + pSamplePos); + } + + if (retLen > remainderBufLen) { + break; + } + + pstr += snprintf(pstr , retLen + 1, "%s", data); + k++; + len += retLen; + remainderBufLen -= retLen; + + verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n", + __func__, __LINE__, len, k, buffer); + + recordFrom ++; + + if (recordFrom >= insertRows) { + break; + } + } + + *dataLen = len; + return k; +} + + +static int generateSQLHeadWithoutStb(char *tableName, + char *dbName, char *buffer, int remainderBufLen) { int len; -#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into .. char headBuf[HEAD_BUFF_LEN]; - if (superTblInfo) { - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); + + if (len > remainderBufLen) + return -1; + + tstrncpy(buffer, headBuf, len + 1); + + return len; +} + +static int generateStbSQLHead( + SSuperTable* superTblInfo, + char *tableName, int32_t tableSeq, + char *dbName, + char *buffer, int remainderBufLen) +{ + int len; + + char headBuf[HEAD_BUFF_LEN]; + + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { char* tagsValBuf = NULL; if (0 == superTblInfo->tagSource) { tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq); @@ -4995,9 +5151,9 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, headBuf, HEAD_BUFF_LEN, "%s.%s using %s.%s tags %s values", - pThreadInfo->db_name, + dbName, tableName, - pThreadInfo->db_name, + dbName, superTblInfo->sTblName, tagsValBuf); tmfree(tagsValBuf); @@ -5006,22 +5162,14 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, headBuf, HEAD_BUFF_LEN, "%s.%s values", - pThreadInfo->db_name, + dbName, tableName); } else { len = snprintf( headBuf, HEAD_BUFF_LEN, "%s.%s values", - pThreadInfo->db_name, - tableName); - } - } else { - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - pThreadInfo->db_name, + dbName, tableName); } @@ -5033,8 +5181,11 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, return len; } -static int64_t generateInterlaceDataBuffer( - char *tableName, uint64_t batchPerTbl, uint64_t i, uint64_t batchPerTblTimes, +static int32_t generateStbInterlaceData( + SSuperTable *superTblInfo, + char *tableName, uint32_t batchPerTbl, + uint64_t i, + uint32_t batchPerTblTimes, uint64_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, @@ -5043,10 +5194,11 @@ static int64_t generateInterlaceDataBuffer( { assert(buffer); char *pstr = buffer; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, - superTblInfo, pstr, *pRemainderBufLen); + int headLen = generateStbSQLHead( + superTblInfo, + tableName, tableSeq, pThreadInfo->db_name, + pstr, *pRemainderBufLen); if (headLen <= 0) { return 0; @@ -5060,29 +5212,25 @@ static int64_t generateInterlaceDataBuffer( int64_t dataLen = 0; - verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%"PRIu64" batchPerTbl = %"PRIu64"\n", + verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%u batchPerTbl = %u\n", pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { startTime = taosGetTimestamp(pThreadInfo->time_precision); - } - } else { - startTime = 1500000000000; } - int64_t k = generateDataTail( - superTblInfo, - batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, - startTime, - &(pThreadInfo->samplePos), &dataLen); + int32_t k = generateStbDataTail( + superTblInfo, + batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, + startTime, + &(pThreadInfo->samplePos), &dataLen); if (k == batchPerTbl) { pstr += dataLen; *pRemainderBufLen -= dataLen; } else { - debugPrint("%s() LN%d, generated data tail: %"PRIu64", not equal batch per table: %"PRIu64"\n", + debugPrint("%s() LN%d, generated data tail: %u, not equal batch per table: %u\n", __func__, __LINE__, k, batchPerTbl); pstr -= headLen; pstr[0] = '\0'; @@ -5092,50 +5240,361 @@ static int64_t generateInterlaceDataBuffer( return k; } -static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, - int disorderRatio, int disorderRange) +static int64_t generateInterlaceDataWithoutStb( + char *tableName, uint32_t batch, + uint64_t tableSeq, + char *dbName, char *buffer, + int64_t insertRows, + int64_t startTime, + uint64_t *pRemainderBufLen) { - int64_t randTail = timeStampStep * seq; - if (disorderRatio > 0) { - int rand_num = taosRandom() % 100; - if(rand_num < disorderRatio) { - randTail = (randTail + - (taosRandom() % disorderRange + 1)) * (-1); - debugPrint("rand data generated, back %"PRId64"\n", randTail); + assert(buffer); + char *pstr = buffer; + + int headLen = generateSQLHeadWithoutStb( + tableName, dbName, + pstr, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen = 0; + + int32_t k = generateDataTailWithoutStb( + batch, pstr, *pRemainderBufLen, insertRows, 0, + startTime, + &dataLen); + + if (k == batch) { + pstr += dataLen; + *pRemainderBufLen -= dataLen; + } else { + debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %u\n", + __func__, __LINE__, k, batch); + pstr -= headLen; + pstr[0] = '\0'; + k = 0; + } + + return k; +} + +#if STMT_IFACE_ENABLED == 1 +static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, + char *dataType, int32_t dataLen, char **ptr) +{ + if (0 == strncasecmp(dataType, + "BINARY", strlen("BINARY"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary = (char *)*ptr; + rand_string(bind_binary, dataLen); + + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + bind->buffer_length = dataLen; + bind->buffer = bind_binary; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "NCHAR", strlen("NCHAR"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar = (char *)*ptr; + rand_string(bind_nchar, dataLen); + + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "INT", strlen("INT"))) { + int32_t *bind_int = (int32_t *)*ptr; + + *bind_int = rand_int(); + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "BIGINT", strlen("BIGINT"))) { + int64_t *bind_bigint = (int64_t *)*ptr; + + *bind_bigint = rand_bigint(); + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "FLOAT", strlen("FLOAT"))) { + float *bind_float = (float *) *ptr; + + *bind_float = rand_float(); + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "DOUBLE", strlen("DOUBLE"))) { + double *bind_double = (double *)*ptr; + + *bind_double = rand_double(); + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "SMALLINT", strlen("SMALLINT"))) { + int16_t *bind_smallint = (int16_t *)*ptr; + + *bind_smallint = rand_smallint(); + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "TINYINT", strlen("TINYINT"))) { + int8_t *bind_tinyint = (int8_t *)*ptr; + + *bind_tinyint = rand_tinyint(); + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "BOOL", strlen("BOOL"))) { + int8_t *bind_bool = (int8_t *)*ptr; + + *bind_bool = rand_bool(); + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else if (0 == strncasecmp(dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + int64_t *bind_ts2 = (int64_t *) *ptr; + + *bind_ts2 = rand_bigint(); + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + *ptr += bind->buffer_length; + } else { + errorPrint( "No support data type: %s\n", + dataType); + return -1; + } + + return 0; +} + +static int32_t prepareStmtWithoutStb( + TAOS_STMT *stmt, + char *tableName, + uint32_t batch, + int64_t insertRows, + int64_t recordFrom, + int64_t startTime) +{ + int ret = taos_stmt_set_tbname(stmt, tableName); + if (ret != 0) { + errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", + tableName, ret, taos_errstr(NULL)); + return ret; + } + + char **data_type = g_args.datatype; + + char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.num_of_CPR + 1)); + if (bindArray == NULL) { + errorPrint("Failed to allocate %d bind params\n", + (g_args.num_of_CPR + 1)); + return -1; + } + + int32_t k = 0; + for (k = 0; k < batch;) { + /* columnCount + 1 (ts) */ + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + + char *ptr = data; + TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); + + int64_t *bind_ts; + + bind_ts = (int64_t *)ptr; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + *bind_ts = startTime + getTSRandTail( + (int64_t)DEFAULT_TIMESTAMP_STEP, k, + g_args.disorderRatio, + g_args.disorderRange); + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + ptr += bind->buffer_length; + + for (int i = 0; i < g_args.num_of_CPR; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); + if ( -1 == prepareStmtBindArrayByType( + bind, + data_type[i], + g_args.len_of_binary, + &ptr)) { + return -1; + } + } + taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + // if msg > 3MB, break + taos_stmt_add_batch(stmt); + + k++; + recordFrom ++; + if (recordFrom >= insertRows) { + break; } } - return randTail; + return k; } -static int64_t generateProgressiveDataBuffer( +static int32_t prepareStbStmt(SSuperTable *stbInfo, + TAOS_STMT *stmt, + char *tableName, uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, char *buffer) +{ + int ret = taos_stmt_set_tbname(stmt, tableName); + if (ret != 0) { + errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", + tableName, ret, taos_errstr(NULL)); + return ret; + } + + char *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + if (bindArray == NULL) { + errorPrint("Failed to allocate %d bind params\n", + (stbInfo->columnCount + 1)); + return -1; + } + + bool tsRand; + if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { + tsRand = true; + } else { + tsRand = false; + } + + uint32_t k; + for (k = 0; k < batch;) { + /* columnCount + 1 (ts) */ + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + + char *ptr = data; + TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); + + int64_t *bind_ts; + + bind_ts = (int64_t *)ptr; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (tsRand) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * k; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + ptr += bind->buffer_length; + + for (int i = 0; i < stbInfo->columnCount; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); + if ( -1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[i].dataType, + stbInfo->columns[i].dataLen, + &ptr)) { + return -1; + } + } + taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + // if msg > 3MB, break + taos_stmt_add_batch(stmt); + + k++; + recordFrom ++; + if (recordFrom >= insertRows) { + break; + } + } + + return k; +} +#endif + +static int32_t generateStbProgressiveData( + SSuperTable *superTblInfo, char *tableName, int64_t tableSeq, - threadInfo *pThreadInfo, char *buffer, + char *dbName, char *buffer, int64_t insertRows, - uint64_t startFrom, int64_t startTime, int64_t *pSamplePos, + uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, int64_t *pRemainderBufLen) { - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - - int ncols_per_record = 1; // count first col ts - - if (superTblInfo == NULL) { - int datatypeSeq = 0; - while(g_args.datatype[datatypeSeq]) { - datatypeSeq ++; - ncols_per_record ++; - } - } - assert(buffer != NULL); char *pstr = buffer; - int64_t k = 0; - memset(buffer, 0, *pRemainderBufLen); - int64_t headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, + int64_t headLen = generateStbSQLHead( + superTblInfo, + tableName, tableSeq, dbName, buffer, *pRemainderBufLen); if (headLen <= 0) { @@ -5145,29 +5604,64 @@ static int64_t generateProgressiveDataBuffer( *pRemainderBufLen -= headLen; int64_t dataLen; - k = generateDataTail(superTblInfo, - g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom, + + return generateStbDataTail(superTblInfo, + g_args.num_of_RPR, pstr, *pRemainderBufLen, + insertRows, recordFrom, startTime, pSamplePos, &dataLen); +} - return k; +static int32_t generateProgressiveDataWithoutStb( + char *tableName, + /* int64_t tableSeq, */ + threadInfo *pThreadInfo, char *buffer, + int64_t insertRows, + uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */ + int64_t *pRemainderBufLen) +{ + assert(buffer != NULL); + char *pstr = buffer; + + memset(buffer, 0, *pRemainderBufLen); + + int64_t headLen = generateSQLHeadWithoutStb( + tableName, pThreadInfo->db_name, + buffer, *pRemainderBufLen); + + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; + + int64_t dataLen; + + return generateDataTailWithoutStb( + g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, + startTime, + /*pSamplePos, */&dataLen); } static void printStatPerThread(threadInfo *pThreadInfo) { - fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", + fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows, (double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0))); } +// sync write interlace data static void* syncWriteInterlace(threadInfo *pThreadInfo) { debugPrint("[%d] %s() LN%d: ### interlace write\n", pThreadInfo->threadID, __func__, __LINE__); int64_t insertRows; - uint64_t interlaceRows; + uint32_t interlaceRows; + uint64_t maxSqlLen; + int64_t nTimeStampStep; + uint64_t insert_interval; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -5180,62 +5674,30 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } else { interlaceRows = superTblInfo->interlaceRows; } + maxSqlLen = superTblInfo->maxSqlLen; + nTimeStampStep = superTblInfo->timeStampStep; + insert_interval = superTblInfo->insertInterval; } else { insertRows = g_args.num_of_DPT; interlaceRows = g_args.interlace_rows; + maxSqlLen = g_args.max_sql_len; + nTimeStampStep = DEFAULT_TIMESTAMP_STEP; + insert_interval = g_args.insert_interval; } + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); + if (interlaceRows > insertRows) interlaceRows = insertRows; if (interlaceRows > g_args.num_of_RPR) interlaceRows = g_args.num_of_RPR; - int insertMode; - - if (interlaceRows > 0) { - insertMode = INTERLACE_INSERT_MODE; - } else { - insertMode = PROGRESSIVE_INSERT_MODE; - } - - // TODO: prompt tbl count multple interlace rows and batch - // - - uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; - char* buffer = calloc(maxSqlLen, 1); - if (NULL == buffer) { - errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", - __func__, __LINE__, maxSqlLen, strerror(errno)); - return NULL; - } - - char tableName[TSDB_TABLE_NAME_LEN]; - - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; - - int64_t nTimeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; - - uint64_t insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; - uint64_t st = 0; - uint64_t et = UINT64_MAX; - - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; - - uint64_t tableSeq = pThreadInfo->start_table_from; - - debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, - pThreadInfo->ntables, insertRows); - - int64_t startTime = pThreadInfo->start_time; - - uint64_t batchPerTbl = interlaceRows; - uint64_t batchPerTblTimes; + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = @@ -5244,52 +5706,116 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { batchPerTblTimes = 1; } + pThreadInfo->buffer = calloc(maxSqlLen, 1); + if (NULL == pThreadInfo->buffer) { + errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, maxSqlLen, strerror(errno)); + return NULL; + } + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; + uint64_t generatedRecPerTbl = 0; bool flagSleep = true; uint64_t sleepTimeTotal = 0; - char *strInsertInto = "insert into "; - int nInsertBufLen = strlen(strInsertInto); - while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampMs(); flagSleep = false; } // generate data - memset(buffer, 0, maxSqlLen); + memset(pThreadInfo->buffer, 0, maxSqlLen); uint64_t remainderBufLen = maxSqlLen; - char *pstr = buffer; + char *pstr = pThreadInfo->buffer; - int len = snprintf(pstr, nInsertBufLen + 1, "%s", strInsertInto); + int len = snprintf(pstr, + strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); pstr += len; remainderBufLen -= len; - uint64_t recOfBatch = 0; + uint32_t recOfBatch = 0; + + for (uint32_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; - for (uint64_t i = 0; i < batchPerTblTimes; i ++) { getTableName(tableName, pThreadInfo, tableSeq); if (0 == strlen(tableName)) { errorPrint("[%d] %s() LN%d, getTableName return null\n", pThreadInfo->threadID, __func__, __LINE__); - free(buffer); + free(pThreadInfo->buffer); return NULL; } uint64_t oldRemainderLen = remainderBufLen; - int64_t generated = generateInterlaceDataBuffer( - tableName, batchPerTbl, i, batchPerTblTimes, - tableSeq, - pThreadInfo, pstr, - insertRows, - startTime, - &remainderBufLen); - debugPrint("[%d] %s() LN%d, generated records is %"PRId64"\n", + int32_t generated; + if (superTblInfo) { + if (superTblInfo->iface == STMT_IFACE) { +#if STMT_IFACE_ENABLED == 1 + generated = prepareStbStmt(superTblInfo, + pThreadInfo->stmt, + tableName, + batchPerTbl, + insertRows, i, + startTime, + pThreadInfo->buffer); +#else + generated = -1; +#endif + } else { + generated = generateStbInterlaceData( + superTblInfo, + tableName, batchPerTbl, i, + batchPerTblTimes, + tableSeq, + pThreadInfo, pstr, + insertRows, + startTime, + &remainderBufLen); + } + } else { + if (g_args.iface == STMT_IFACE) { + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batchPerTbl, startTime); +#if STMT_IFACE_ENABLED == 1 + generated = prepareStmtWithoutStb( + pThreadInfo->stmt, tableName, + batchPerTbl, + insertRows, i, + startTime); +#else + generated = -1; +#endif + } else { + generated = generateInterlaceDataWithoutStb( + tableName, batchPerTbl, + tableSeq, + pThreadInfo->db_name, pstr, + insertRows, + startTime, + &remainderBufLen); + } + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", pThreadInfo->threadID, __func__, __LINE__, generated); if (generated < 0) { - errorPrint("[%d] %s() LN%d, generated records is %"PRId64"\n", + errorPrint("[%d] %s() LN%d, generated records is %d\n", pThreadInfo->threadID, __func__, __LINE__, generated); goto free_of_interlace; } else if (generated == 0) { @@ -5298,15 +5824,15 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { tableSeq ++; recOfBatch += batchPerTbl; + pstr += (oldRemainderLen - remainderBufLen); -// startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; - verbosePrint("[%d] %s() LN%d batchPerTbl=%"PRId64" recOfBatch=%"PRId64"\n", + + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, recOfBatch); - if (insertMode == INTERLACE_INSERT_MODE) { - if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { // turn to first table tableSeq = pThreadInfo->start_table_from; generatedRecPerTbl += batchPerTbl; @@ -5318,13 +5844,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if (generatedRecPerTbl >= insertRows) break; - int remainRows = insertRows - generatedRecPerTbl; + int64_t remainRows = insertRows - generatedRecPerTbl; if ((remainRows > 0) && (batchPerTbl > remainRows)) batchPerTbl = remainRows; if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) break; - } } verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", @@ -5335,22 +5860,22 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { break; } - verbosePrint("[%d] %s() LN%d recOfBatch=%"PRIu64" totalInsertRows=%"PRIu64"\n", + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, pThreadInfo->totalInsertRows); verbosePrint("[%d] %s() LN%d, buffer=%s\n", - pThreadInfo->threadID, __func__, __LINE__, buffer); + pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); startTs = taosGetTimestampMs(); if (recOfBatch == 0) { - errorPrint("[%d] %s() LN%d try inserting records of batch is %"PRIu64"\n", + errorPrint("[%d] %s() LN%d try inserting records of batch is %d\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch); errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n"); goto free_of_interlace; } - int64_t affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); endTs = taosGetTimestampMs(); uint64_t delay = endTs - startTs; @@ -5366,9 +5891,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->totalDelay += delay; if (recOfBatch != affectedRows) { - errorPrint("[%d] %s() LN%d execInsert insert %"PRIu64", affected rows: %"PRId64"\n%s\n", + errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", pThreadInfo->threadID, __func__, __LINE__, - recOfBatch, affectedRows, buffer); + recOfBatch, affectedRows, pThreadInfo->buffer); goto free_of_interlace; } @@ -5387,8 +5912,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { et = taosGetTimestampMs(); if (insert_interval > (et - st) ) { - int sleepTime = insert_interval - (et -st); - performancePrint("%s() LN%d sleep: %d ms for insert interval\n", + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", __func__, __LINE__, sleepTime); taosMsleep(sleepTime); // ms sleepTimeTotal += insert_interval; @@ -5397,27 +5922,26 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } free_of_interlace: - tmfree(buffer); + tmfree(pThreadInfo->buffer); printStatPerThread(pThreadInfo); return NULL; } -// sync insertion -/* - 1 thread: 100 tables * 2000 rows/s - 1 thread: 10 tables * 20000 rows/s - 6 thread: 300 tables * 2000 rows/s - - 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s -*/ +// sync insertion progressive data static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int64_t timeStampStep = + superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; + int64_t insertRows = + (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; + verbosePrint("%s() LN%d insertRows=%"PRId64"\n", + __func__, __LINE__, insertRows); - char* buffer = calloc(maxSqlLen, 1); - if (NULL == buffer) { + pThreadInfo->buffer = calloc(maxSqlLen, 1); + if (NULL == pThreadInfo->buffer) { errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n", maxSqlLen, strerror(errno)); @@ -5428,35 +5952,17 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { uint64_t startTs = taosGetTimestampMs(); uint64_t endTs; - int64_t timeStampStep = - superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; -/* int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; - uint64_t st = 0; - uint64_t et = 0xffffffff; - */ - pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; pThreadInfo->samplePos = 0; - for (uint64_t tableSeq = - pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; - tableSeq ++) { + for (uint64_t tableSeq = pThreadInfo->start_table_from; + tableSeq <= pThreadInfo->end_table_to; + tableSeq ++) { int64_t start_time = pThreadInfo->start_time; - int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; - - verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); - for (uint64_t i = 0; i < insertRows;) { - /* - if (insert_interval) { - st = taosGetTimestampMs(); - } - */ - char tableName[TSDB_TABLE_NAME_LEN]; getTableName(tableName, pThreadInfo, tableSeq); verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", @@ -5464,19 +5970,57 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->threadID, tableSeq, tableName); int64_t remainderBufLen = maxSqlLen; - char *pstr = buffer; - int nInsertBufLen = strlen("insert into "); + char *pstr = pThreadInfo->buffer; - int len = snprintf(pstr, nInsertBufLen + 1, "%s", "insert into "); + int len = snprintf(pstr, + strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); pstr += len; remainderBufLen -= len; - int64_t generated = generateProgressiveDataBuffer( - tableName, tableSeq, pThreadInfo, pstr, insertRows, - i, start_time, - &(pThreadInfo->samplePos), - &remainderBufLen); + int32_t generated; + if (superTblInfo) { + if (superTblInfo->iface == STMT_IFACE) { +#if STMT_IFACE_ENABLED == 1 + generated = prepareStbStmt( + superTblInfo, + pThreadInfo->stmt, + tableName, + g_args.num_of_RPR, + insertRows, i, start_time, pstr); +#else + generated = -1; +#endif + } else { + generated = generateStbProgressiveData( + superTblInfo, + tableName, tableSeq, pThreadInfo->db_name, pstr, + insertRows, i, start_time, + &(pThreadInfo->samplePos), + &remainderBufLen); + } + } else { + if (g_args.iface == STMT_IFACE) { +#if STMT_IFACE_ENABLED == 1 + generated = prepareStmtWithoutStb( + pThreadInfo->stmt, + tableName, + g_args.num_of_RPR, + insertRows, i, + start_time); +#else + generated = -1; +#endif + } else { + generated = generateProgressiveDataWithoutStb( + tableName, + /* tableSeq, */ + pThreadInfo, pstr, insertRows, + i, start_time, + /* &(pThreadInfo->samplePos), */ + &remainderBufLen); + } + } if (generated > 0) i += generated; else @@ -5487,13 +6031,13 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { startTs = taosGetTimestampMs(); - int64_t affectedRows = execInsert(pThreadInfo, buffer, generated); + int32_t affectedRows = execInsert(pThreadInfo, generated); endTs = taosGetTimestampMs(); uint64_t delay = endTs - startTs; performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", __func__, __LINE__, delay); - verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, __func__, __LINE__, affectedRows); @@ -5503,7 +6047,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalDelay += delay; if (affectedRows < 0) { - errorPrint("%s() LN%d, affected rows: %"PRId64"\n", + errorPrint("%s() LN%d, affected rows: %d\n", __func__, __LINE__, affectedRows); goto free_of_progressive; } @@ -5521,32 +6065,19 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (i >= insertRows) break; -/* - if (insert_interval) { - et = taosGetTimestampMs(); - - if (insert_interval > ((et - st)) ) { - int sleep_time = insert_interval - (et -st); - performancePrint("%s() LN%d sleep: %d ms for insert interval\n", - __func__, __LINE__, sleep_time); - taosMsleep(sleep_time); // ms - } - } - */ } // num_of_DPT - if (g_args.verbose_print) { - if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && + if ((g_args.verbose_print) && + (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) && (0 == strncasecmp( superTblInfo->dataSource, "sample", strlen("sample")))) { verbosePrint("%s() LN%d samplePos=%"PRId64"\n", __func__, __LINE__, pThreadInfo->samplePos); - } } } // tableSeq free_of_progressive: - tmfree(buffer); + tmfree(pThreadInfo->buffer); printStatPerThread(pThreadInfo); return NULL; } @@ -5556,7 +6087,7 @@ static void* syncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - int interlaceRows; + uint32_t interlaceRows; if (superTblInfo) { if ((superTblInfo->interlaceRows == 0) @@ -5576,7 +6107,6 @@ static void* syncWrite(void *sarg) { // progressive mode return syncWriteProgressive(pThreadInfo); } - } static void callBack(void *param, TAOS_RES *res, int code) { @@ -5616,9 +6146,10 @@ static void callBack(void *param, TAOS_RES *res, int code) { && rand_num < pThreadInfo->superTblInfo->disorderRatio) { int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); - generateRowData(data, d, pThreadInfo->superTblInfo); + generateStbRowData(pThreadInfo->superTblInfo, data, d); } else { - generateRowData(data, pThreadInfo->lastTs += 1000, pThreadInfo->superTblInfo); + generateStbRowData(pThreadInfo->superTblInfo, + data, pThreadInfo->lastTs += 1000); } pstr += sprintf(pstr, "%s", data); pThreadInfo->counter++; @@ -5686,24 +6217,6 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * static void startMultiThreadInsertData(int threads, char* db_name, char* precision,SSuperTable* superTblInfo) { - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - assert(pids != NULL); - - threadInfo *infos = malloc(threads * sizeof(threadInfo)); - assert(infos != NULL); - - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(threadInfo)); - - //TAOS* taos; - //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { - // taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - // if (NULL == taos) { - // printf("connect to server fail, reason: %s\n", taos_errstr(NULL)); - // exit(-1); - // } - //} - int32_t timePrec = TSDB_TIME_PRECISION_MILLI; if (0 != precision[0]) { if (0 == strncasecmp(precision, "ms", 2)) { @@ -5755,17 +6268,17 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } - TAOS* taos = taos_connect( + TAOS* taos0 = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); - if (NULL == taos) { + if (NULL == taos0) { errorPrint("%s() LN%d, connect to server fail , reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); exit(-1); } int64_t ntables = 0; - uint64_t startFrom; + uint64_t tableFrom; if (superTblInfo) { int64_t limit; @@ -5792,7 +6305,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } ntables = limit; - startFrom = offset; + tableFrom = offset; if ((superTblInfo->childTblExists != TBL_NO_EXISTS) && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) @@ -5811,23 +6324,23 @@ static void startMultiThreadInsertData(int threads, char* db_name, limit * TSDB_TABLE_NAME_LEN); if (superTblInfo->childTblName == NULL) { errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); - taos_close(taos); + taos_close(taos0); exit(-1); } int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( - taos, + taos0, db_name, superTblInfo->sTblName, &superTblInfo->childTblName, &childTblCount, limit, offset); } else { ntables = g_args.num_of_tables; - startFrom = 0; + tableFrom = 0; } - taos_close(taos); + taos_close(taos0); int64_t a = ntables / threads; if (a < 1) { @@ -5841,11 +6354,22 @@ static void startMultiThreadInsertData(int threads, char* db_name, } if ((superTblInfo) - && (0 == strncasecmp(superTblInfo->insertMode, "rest", strlen("rest")))) { - if (convertHostToServAddr(g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) - exit(-1); + && (superTblInfo->iface == REST_IFACE)) { + if (convertHostToServAddr( + g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { + exit(-1); + } } + pthread_t *pids = malloc(threads * sizeof(pthread_t)); + assert(pids != NULL); + + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + assert(infos != NULL); + + memset(pids, 0, threads * sizeof(pthread_t)); + memset(infos, 0, threads * sizeof(threadInfo)); + for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; @@ -5857,17 +6381,59 @@ static void startMultiThreadInsertData(int threads, char* db_name, pThreadInfo->minDelay = UINT64_MAX; if ((NULL == superTblInfo) || - (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) { - //pThreadInfo->taos = taos; + (superTblInfo->iface != REST_IFACE)) { + //t_info->taos = taos; pThreadInfo->taos = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == pThreadInfo->taos) { errorPrint( - "connect to server fail from insert sub thread, reason: %s\n", + "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", + __func__, __LINE__, taos_errstr(NULL)); + free(infos); exit(-1); } + + if ((g_args.iface == STMT_IFACE) + || ((superTblInfo) && (superTblInfo->iface == STMT_IFACE))) { + + int columnCount; + if (superTblInfo) { + columnCount = superTblInfo->columnCount; + } else { + columnCount = g_args.num_of_CPR; + } + + pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); + if (NULL == pThreadInfo->stmt) { + errorPrint( + "%s() LN%d, failed init stmt, reason: %s\n", + __func__, __LINE__, + taos_errstr(NULL)); + free(pids); + free(infos); + exit(-1); + } + + char buffer[3000]; + char *pstr = buffer; + pstr += sprintf(pstr, "INSERT INTO ? values(?"); + + for (int col = 0; col < columnCount; col ++) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ")"); + + int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0); + if (ret != 0){ + errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", + ret, taos_errstr(NULL)); + free(pids); + free(infos); + exit(-1); + } + } } else { pThreadInfo->taos = NULL; } @@ -5875,10 +6441,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, /* if ((NULL == superTblInfo) || (0 == superTblInfo->multiThreadWriteOneTbl)) { */ - pThreadInfo->start_table_from = startFrom; + pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; - startFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; /* } else { pThreadInfo->start_table_from = 0; pThreadInfo->ntables = superTblInfo->childTblCount; @@ -5906,6 +6472,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; + tsem_destroy(&(pThreadInfo->lock_sem)); + + if (pThreadInfo->stmt) { + taos_stmt_close(pThreadInfo->stmt); + } tsem_destroy(&(pThreadInfo->lock_sem)); taos_close(pThreadInfo->taos); @@ -6182,13 +6753,13 @@ static int insertTestProcess() { } } - taosMsleep(1000); + // taosMsleep(1000); // create sub threads for inserting data //start = taosGetTimestampMs(); for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.use_metric) { if (g_Dbs.db[i].superTblCount > 0) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; @@ -6512,15 +7083,15 @@ static int queryTestProcess() { b = ntables % threads; } - uint64_t startFrom = 0; + uint64_t tableFrom = 0; for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infosOfSub + i; pThreadInfo->threadID = i; - pThreadInfo->start_table_from = startFrom; + pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; - startFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); } @@ -6901,12 +7472,12 @@ static int subscribeTestProcess() { //==== create threads for query for specified table if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, sepcified query sqlCount %d.\n", + debugPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); } else { if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", + errorPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); exit(-1); @@ -6939,7 +7510,7 @@ static int subscribeTestProcess() { //==== create threads for super table query if (g_queryInfo.superQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, super table query sqlCount %d.\n", + debugPrint("%s() LN%d, super table query sqlCount %"PRIu64".\n", __func__, __LINE__, g_queryInfo.superQueryInfo.sqlCount); } else { @@ -6975,17 +7546,17 @@ static int subscribeTestProcess() { } for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - uint64_t startFrom = 0; + uint64_t tableFrom = 0; for (int j = 0; j < threads; j++) { uint64_t seq = i * threads + j; threadInfo *pThreadInfo = infosOfStable + seq; pThreadInfo->threadID = seq; pThreadInfo->querySeq = i; - pThreadInfo->start_table_from = startFrom; + pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = jend_table_to = jend_table_to + 1; + pThreadInfo->end_table_to = jend_table_to + 1; pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; pthread_create(pidsOfStable + seq, NULL, superSubscribe, pThreadInfo); @@ -7104,7 +7675,7 @@ static void setParaFromArg(){ tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); - tstrncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].iface = g_args.iface; tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP; From ef58f09931fe7cffbf16894596b3e5097f7bd7c1 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Fri, 28 May 2021 19:09:55 +0800 Subject: [PATCH 29/82] support alter column length --- src/client/src/tscSQLParser.c | 32 +- src/inc/ttokendef.h | 100 +- src/mnode/src/mnodeTable.c | 12 +- src/query/inc/sql.y | 14 +- src/query/src/qSqlParser.c | 2 +- src/query/src/sql.c | 1664 ++++++++++++++++++++------------- src/util/src/ttokenizer.c | 3 +- 7 files changed, 1110 insertions(+), 717 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 61b659f96c..d5141fad10 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5074,6 +5074,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg18 = "primary timestamp column cannot be dropped"; const char* msg19 = "invalid new tag name"; const char* msg20 = "table is not super table"; + const char* msg21 = "only binary/nchar column length could be altered"; + const char* msg22 = "invalid column length"; int32_t code = TSDB_CODE_SUCCESS; @@ -5110,7 +5112,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); - } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) && + } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) && UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } @@ -5326,6 +5328,34 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tstrncpy(name1, pItem->pVar.pz, sizeof(name1)); TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); + } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { + if (taosArrayGetSize(pAlterSQL->pAddColumns) != 2) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), NULL); + } + + tVariantListItem* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0); + + SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; + SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; + if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17); + } + + SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); + + if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg21); + } + + pItem = taosArrayGet(pAlterSQL->pAddColumns, 1); + int64_t nlen = 0; + + if (tVariantDump(&pItem->pVar, (char *)&nlen, TSDB_DATA_TYPE_BIGINT, false) < 0 || nlen <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg22); + } + + TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, nlen); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } return TSDB_CODE_SUCCESS; diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index ef3f8ed1fb..51272e15f5 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -155,54 +155,58 @@ #define TK_SYNCDB 136 #define TK_ADD 137 #define TK_COLUMN 138 -#define TK_TAG 139 -#define TK_CHANGE 140 -#define TK_SET 141 -#define TK_KILL 142 -#define TK_CONNECTION 143 -#define TK_STREAM 144 -#define TK_COLON 145 -#define TK_ABORT 146 -#define TK_AFTER 147 -#define TK_ATTACH 148 -#define TK_BEFORE 149 -#define TK_BEGIN 150 -#define TK_CASCADE 151 -#define TK_CLUSTER 152 -#define TK_CONFLICT 153 -#define TK_COPY 154 -#define TK_DEFERRED 155 -#define TK_DELIMITERS 156 -#define TK_DETACH 157 -#define TK_EACH 158 -#define TK_END 159 -#define TK_EXPLAIN 160 -#define TK_FAIL 161 -#define TK_FOR 162 -#define TK_IGNORE 163 -#define TK_IMMEDIATE 164 -#define TK_INITIALLY 165 -#define TK_INSTEAD 166 -#define TK_MATCH 167 -#define TK_KEY 168 -#define TK_OF 169 -#define TK_RAISE 170 -#define TK_REPLACE 171 -#define TK_RESTRICT 172 -#define TK_ROW 173 -#define TK_STATEMENT 174 -#define TK_TRIGGER 175 -#define TK_VIEW 176 -#define TK_SEMI 177 -#define TK_NONE 178 -#define TK_PREV 179 -#define TK_LINEAR 180 -#define TK_IMPORT 181 -#define TK_TBNAME 182 -#define TK_JOIN 183 -#define TK_INSERT 184 -#define TK_INTO 185 -#define TK_VALUES 186 +#define TK_LENGTH 139 +#define TK_TAG 140 +#define TK_CHANGE 141 +#define TK_SET 142 +#define TK_KILL 143 +#define TK_CONNECTION 144 +#define TK_STREAM 145 +#define TK_COLON 146 +#define TK_ABORT 147 +#define TK_AFTER 148 +#define TK_ATTACH 149 +#define TK_BEFORE 150 +#define TK_BEGIN 151 +#define TK_CASCADE 152 +#define TK_CLUSTER 153 +#define TK_CONFLICT 154 +#define TK_COPY 155 +#define TK_DEFERRED 156 +#define TK_DELIMITERS 157 +#define TK_DETACH 158 +#define TK_EACH 159 +#define TK_END 160 +#define TK_EXPLAIN 161 +#define TK_FAIL 162 +#define TK_FOR 163 +#define TK_IGNORE 164 +#define TK_IMMEDIATE 165 +#define TK_INITIALLY 166 +#define TK_INSTEAD 167 +#define TK_MATCH 168 +#define TK_KEY 169 +#define TK_OF 170 +#define TK_RAISE 171 +#define TK_REPLACE 172 +#define TK_RESTRICT 173 +#define TK_ROW 174 +#define TK_STATEMENT 175 +#define TK_TRIGGER 176 +#define TK_VIEW 177 +#define TK_SEMI 178 +#define TK_NONE 179 +#define TK_PREV 180 +#define TK_LINEAR 181 +#define TK_IMPORT 182 +#define TK_TBNAME 183 +#define TK_JOIN 184 +#define TK_INSERT 185 +#define TK_INTO 186 +#define TK_VALUES 187 + + + #define TK_SPACE 300 diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 4e879537e4..89d13a4c12 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -3100,7 +3100,10 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { } else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) { code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name); } else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { - code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name); + //code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name); + (void)mnodeChangeSuperTableColumn; + mError("change table[%s] column[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes); + code = TSDB_CODE_SUCCESS; } else { } } else { @@ -3112,7 +3115,10 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { } else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) { code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name); } else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { - code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name); + //code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name); + (void)mnodeChangeNormalTableColumn; + mError("change table[%s] column[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes); + code = TSDB_CODE_SUCCESS; } else { } } @@ -3303,4 +3309,4 @@ int32_t mnodeCompactTables() { mnodeCompactChildTables(); return 0; -} \ No newline at end of file +} diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 8ef8ef0e2b..62f7da9f9c 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -28,7 +28,7 @@ #include #include "qSqlparser.h" #include "tcmdtype.h" -#include "tstoken.h" +#include "ttoken.h" #include "ttokendef.h" #include "tutil.h" #include "tvariant.h" @@ -748,6 +748,18 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +cmd ::= ALTER TABLE ids(X) cpxName(F) ALTER COLUMN LENGTH ids(A) INTEGER(Z). { + X.n += F.n; + + toTSDBType(A.type); + SArray* K = tVariantListAppendToken(NULL, &A, -1); + toTSDBType(Z.type); + K = tVariantListAppendToken(K, &Z, -1); + + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, K, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + //////////////////////////////////ALTER TAGS statement///////////////////////////////////// cmd ::= ALTER TABLE ids(X) cpxName(Y) ADD TAG columnlist(A). { X.n += Y.n; diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index 2459439b7b..959de7530d 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -885,7 +885,7 @@ SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray pAlterTable->type = type; pAlterTable->tableType = tableType; - if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { + if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { pAlterTable->pAddColumns = pCols; assert(pVals == NULL); } else { diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 5a5038be79..ca840ea4dc 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -23,13 +23,14 @@ ** input grammar file: */ #include +#include /************ Begin %include sections from the grammar ************************/ -#include -#include #include #include #include +#include +#include #include "qSqlparser.h" #include "tcmdtype.h" #include "ttoken.h" @@ -76,8 +77,10 @@ ** zero the stack is dynamically sized using realloc() ** ParseARG_SDECL A static variable declaration for the %extra_argument ** ParseARG_PDECL A parameter declaration for the %extra_argument +** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter ** ParseARG_STORE Code to store %extra_argument into yypParser ** ParseARG_FETCH Code to extract %extra_argument from yypParser +** ParseCTX_* As ParseARG_ except for %extra_context ** YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. ** YYNSTATE the combined number of states. @@ -97,7 +100,7 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 264 +#define YYNOCODE 263 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { @@ -124,21 +127,29 @@ typedef union { #endif #define ParseARG_SDECL SSqlInfo* pInfo; #define ParseARG_PDECL ,SSqlInfo* pInfo -#define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo -#define ParseARG_STORE yypParser->pInfo = pInfo +#define ParseARG_PARAM ,pInfo +#define ParseARG_FETCH SSqlInfo* pInfo=yypParser->pInfo; +#define ParseARG_STORE yypParser->pInfo=pInfo; +#define ParseCTX_SDECL +#define ParseCTX_PDECL +#define ParseCTX_PARAM +#define ParseCTX_FETCH +#define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 317 -#define YYNRULE 270 -#define YYNTOKEN 187 -#define YY_MAX_SHIFT 316 -#define YY_MIN_SHIFTREDUCE 511 -#define YY_MAX_SHIFTREDUCE 780 -#define YY_ERROR_ACTION 781 -#define YY_ACCEPT_ACTION 782 -#define YY_NO_ACTION 783 -#define YY_MIN_REDUCE 784 -#define YY_MAX_REDUCE 1053 +#define YYNSTATE 321 +#define YYNRULE 271 +#define YYNRULE_WITH_ACTION 271 +#define YYNTOKEN 188 +#define YY_MAX_SHIFT 320 +#define YY_MIN_SHIFTREDUCE 516 +#define YY_MAX_SHIFTREDUCE 786 +#define YY_ERROR_ACTION 787 +#define YY_ACCEPT_ACTION 788 +#define YY_NO_ACTION 789 +#define YY_MIN_REDUCE 790 +#define YY_MAX_REDUCE 1060 /************* End control #defines *******************************************/ +#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) /* Define the yytestcase() macro to be a no-op if is not already defined ** otherwise. @@ -205,145 +216,145 @@ typedef union { *********** Begin parsing tables **********************************************/ #define YY_ACTTAB_COUNT (685) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 925, 559, 206, 314, 211, 141, 952, 3, 168, 560, - /* 10 */ 782, 316, 134, 47, 48, 141, 51, 52, 30, 183, - /* 20 */ 217, 41, 183, 50, 264, 55, 53, 57, 54, 1034, - /* 30 */ 931, 214, 1035, 46, 45, 17, 183, 44, 43, 42, - /* 40 */ 47, 48, 223, 51, 52, 213, 1035, 217, 41, 559, - /* 50 */ 50, 264, 55, 53, 57, 54, 943, 560, 181, 208, - /* 60 */ 46, 45, 928, 222, 44, 43, 42, 48, 949, 51, - /* 70 */ 52, 244, 983, 217, 41, 249, 50, 264, 55, 53, - /* 80 */ 57, 54, 984, 638, 259, 85, 46, 45, 280, 931, - /* 90 */ 44, 43, 42, 512, 513, 514, 515, 516, 517, 518, - /* 100 */ 519, 520, 521, 522, 523, 524, 315, 943, 187, 207, - /* 110 */ 70, 290, 289, 47, 48, 30, 51, 52, 300, 919, - /* 120 */ 217, 41, 209, 50, 264, 55, 53, 57, 54, 44, - /* 130 */ 43, 42, 724, 46, 45, 674, 224, 44, 43, 42, - /* 140 */ 47, 49, 24, 51, 52, 228, 141, 217, 41, 559, - /* 150 */ 50, 264, 55, 53, 57, 54, 220, 560, 105, 928, - /* 160 */ 46, 45, 931, 300, 44, 43, 42, 23, 278, 309, - /* 170 */ 308, 277, 276, 275, 307, 274, 306, 305, 304, 273, - /* 180 */ 303, 302, 891, 30, 879, 880, 881, 882, 883, 884, - /* 190 */ 885, 886, 887, 888, 889, 890, 892, 893, 51, 52, - /* 200 */ 830, 1031, 217, 41, 167, 50, 264, 55, 53, 57, - /* 210 */ 54, 261, 18, 78, 230, 46, 45, 287, 286, 44, - /* 220 */ 43, 42, 216, 739, 221, 30, 728, 928, 731, 192, - /* 230 */ 734, 216, 739, 310, 1030, 728, 193, 731, 236, 734, - /* 240 */ 30, 118, 117, 191, 677, 559, 240, 239, 55, 53, - /* 250 */ 57, 54, 25, 560, 202, 203, 46, 45, 263, 931, - /* 260 */ 44, 43, 42, 202, 203, 74, 283, 61, 23, 928, - /* 270 */ 309, 308, 74, 36, 730, 307, 733, 306, 305, 304, - /* 280 */ 36, 303, 302, 899, 927, 662, 897, 898, 659, 62, - /* 290 */ 660, 900, 661, 902, 903, 901, 82, 904, 905, 103, - /* 300 */ 97, 108, 243, 917, 68, 30, 107, 113, 116, 106, - /* 310 */ 199, 5, 33, 157, 141, 110, 231, 232, 156, 92, - /* 320 */ 87, 91, 681, 226, 30, 56, 30, 914, 915, 29, - /* 330 */ 918, 729, 740, 732, 56, 175, 173, 171, 736, 1, - /* 340 */ 155, 740, 170, 121, 120, 119, 284, 736, 229, 928, - /* 350 */ 265, 46, 45, 69, 735, 44, 43, 42, 839, 666, - /* 360 */ 12, 667, 167, 735, 84, 288, 81, 292, 928, 215, - /* 370 */ 928, 313, 312, 126, 132, 130, 129, 80, 705, 706, - /* 380 */ 831, 79, 280, 929, 167, 916, 737, 245, 726, 684, - /* 390 */ 71, 31, 227, 994, 663, 282, 690, 247, 696, 697, - /* 400 */ 136, 760, 60, 20, 741, 19, 64, 648, 19, 241, - /* 410 */ 267, 31, 650, 6, 31, 269, 60, 1029, 649, 83, - /* 420 */ 28, 200, 60, 270, 727, 201, 65, 96, 95, 185, - /* 430 */ 14, 13, 993, 102, 101, 67, 218, 637, 16, 15, - /* 440 */ 664, 186, 665, 738, 115, 114, 743, 188, 182, 189, - /* 450 */ 190, 196, 197, 195, 180, 194, 184, 133, 1045, 990, - /* 460 */ 930, 989, 219, 291, 39, 951, 959, 944, 961, 135, - /* 470 */ 139, 976, 248, 975, 926, 131, 152, 151, 924, 153, - /* 480 */ 250, 154, 689, 210, 252, 150, 257, 145, 142, 842, - /* 490 */ 941, 143, 272, 144, 262, 37, 146, 66, 58, 178, - /* 500 */ 63, 260, 34, 258, 256, 281, 838, 147, 1050, 254, - /* 510 */ 93, 1049, 1047, 158, 285, 1044, 99, 148, 1043, 1041, - /* 520 */ 159, 860, 251, 35, 32, 38, 149, 179, 827, 109, - /* 530 */ 825, 111, 112, 823, 822, 233, 169, 820, 819, 818, - /* 540 */ 817, 816, 815, 172, 174, 40, 812, 810, 808, 806, - /* 550 */ 176, 803, 177, 301, 246, 72, 75, 104, 253, 977, - /* 560 */ 293, 294, 295, 296, 297, 204, 225, 298, 271, 299, - /* 570 */ 311, 780, 205, 198, 234, 88, 89, 235, 779, 237, - /* 580 */ 238, 778, 766, 765, 242, 247, 821, 814, 162, 266, - /* 590 */ 122, 861, 160, 165, 161, 164, 163, 166, 123, 124, - /* 600 */ 813, 805, 895, 125, 804, 2, 8, 73, 4, 669, - /* 610 */ 76, 691, 137, 212, 694, 86, 138, 77, 907, 255, - /* 620 */ 9, 698, 140, 26, 742, 7, 27, 11, 10, 21, - /* 630 */ 84, 744, 22, 268, 601, 597, 595, 594, 593, 590, - /* 640 */ 563, 279, 94, 90, 31, 59, 640, 639, 636, 585, - /* 650 */ 583, 98, 575, 581, 577, 579, 573, 571, 604, 603, - /* 660 */ 602, 600, 599, 100, 598, 596, 592, 591, 60, 561, - /* 670 */ 528, 784, 526, 783, 783, 783, 783, 783, 783, 127, - /* 680 */ 783, 783, 783, 783, 128, + /* 0 */ 135, 564, 207, 318, 212, 142, 958, 230, 142, 565, + /* 10 */ 788, 320, 17, 47, 48, 142, 51, 52, 30, 184, + /* 20 */ 218, 41, 184, 50, 265, 55, 53, 57, 54, 1040, + /* 30 */ 937, 215, 1041, 46, 45, 182, 184, 44, 43, 42, + /* 40 */ 47, 48, 935, 51, 52, 214, 1041, 218, 41, 564, + /* 50 */ 50, 265, 55, 53, 57, 54, 949, 565, 188, 209, + /* 60 */ 46, 45, 934, 250, 44, 43, 42, 48, 955, 51, + /* 70 */ 52, 245, 989, 218, 41, 79, 50, 265, 55, 53, + /* 80 */ 57, 54, 990, 106, 260, 281, 46, 45, 304, 227, + /* 90 */ 44, 43, 42, 517, 518, 519, 520, 521, 522, 523, + /* 100 */ 524, 525, 526, 527, 528, 529, 319, 643, 85, 208, + /* 110 */ 70, 564, 304, 47, 48, 30, 51, 52, 1037, 565, + /* 120 */ 218, 41, 923, 50, 265, 55, 53, 57, 54, 44, + /* 130 */ 43, 42, 729, 46, 45, 294, 293, 44, 43, 42, + /* 140 */ 47, 49, 925, 51, 52, 1036, 142, 218, 41, 564, + /* 150 */ 50, 265, 55, 53, 57, 54, 221, 565, 228, 934, + /* 160 */ 46, 45, 283, 1052, 44, 43, 42, 23, 279, 313, + /* 170 */ 312, 278, 277, 276, 311, 275, 310, 309, 308, 274, + /* 180 */ 307, 306, 897, 30, 885, 886, 887, 888, 889, 890, + /* 190 */ 891, 892, 893, 894, 895, 896, 898, 899, 51, 52, + /* 200 */ 836, 281, 218, 41, 168, 50, 265, 55, 53, 57, + /* 210 */ 54, 262, 18, 78, 25, 46, 45, 1, 156, 44, + /* 220 */ 43, 42, 217, 744, 222, 30, 733, 934, 736, 193, + /* 230 */ 739, 217, 744, 223, 12, 733, 194, 736, 84, 739, + /* 240 */ 81, 119, 118, 192, 317, 316, 127, 225, 55, 53, + /* 250 */ 57, 54, 949, 731, 203, 204, 46, 45, 264, 937, + /* 260 */ 44, 43, 42, 203, 204, 1035, 284, 210, 23, 934, + /* 270 */ 313, 312, 74, 937, 735, 311, 738, 310, 309, 308, + /* 280 */ 36, 307, 306, 905, 201, 667, 903, 904, 664, 732, + /* 290 */ 665, 906, 666, 908, 909, 907, 82, 910, 911, 104, + /* 300 */ 97, 109, 244, 202, 68, 30, 108, 114, 117, 107, + /* 310 */ 74, 200, 5, 33, 158, 111, 232, 233, 36, 157, + /* 320 */ 92, 87, 91, 682, 229, 56, 30, 920, 921, 29, + /* 330 */ 924, 291, 745, 30, 56, 176, 174, 172, 741, 314, + /* 340 */ 931, 745, 171, 122, 121, 120, 285, 741, 30, 934, + /* 350 */ 237, 46, 45, 69, 740, 44, 43, 42, 689, 241, + /* 360 */ 240, 266, 734, 740, 737, 937, 248, 292, 80, 61, + /* 370 */ 934, 133, 131, 130, 296, 3, 169, 934, 186, 845, + /* 380 */ 837, 71, 224, 168, 168, 922, 742, 710, 711, 679, + /* 390 */ 216, 62, 933, 231, 668, 187, 24, 288, 287, 246, + /* 400 */ 695, 686, 701, 31, 137, 702, 60, 765, 746, 20, + /* 410 */ 64, 19, 19, 653, 189, 268, 655, 31, 270, 31, + /* 420 */ 60, 654, 83, 28, 183, 60, 271, 96, 190, 95, + /* 430 */ 65, 14, 1000, 13, 6, 67, 103, 642, 102, 671, + /* 440 */ 669, 672, 670, 16, 191, 15, 116, 115, 197, 198, + /* 450 */ 196, 181, 195, 185, 936, 999, 219, 748, 996, 995, + /* 460 */ 220, 295, 242, 134, 39, 957, 965, 967, 136, 950, + /* 470 */ 249, 982, 140, 981, 743, 932, 152, 132, 153, 930, + /* 480 */ 251, 154, 155, 848, 694, 305, 147, 273, 947, 143, + /* 490 */ 37, 263, 144, 145, 211, 179, 66, 34, 282, 253, + /* 500 */ 258, 63, 58, 844, 261, 1057, 93, 259, 1056, 146, + /* 510 */ 257, 1054, 159, 286, 1051, 99, 289, 1050, 1047, 160, + /* 520 */ 866, 148, 255, 35, 32, 38, 180, 833, 110, 831, + /* 530 */ 112, 113, 829, 828, 234, 170, 826, 252, 825, 824, + /* 540 */ 823, 822, 821, 173, 175, 818, 816, 814, 40, 812, + /* 550 */ 177, 809, 178, 105, 247, 72, 75, 254, 983, 297, + /* 560 */ 298, 299, 300, 301, 302, 303, 315, 786, 205, 226, + /* 570 */ 235, 272, 236, 785, 238, 239, 206, 199, 88, 784, + /* 580 */ 89, 771, 770, 243, 248, 76, 827, 674, 267, 8, + /* 590 */ 123, 696, 124, 163, 162, 867, 161, 164, 165, 167, + /* 600 */ 166, 820, 2, 125, 819, 901, 126, 811, 4, 73, + /* 610 */ 810, 138, 151, 149, 150, 139, 699, 77, 913, 213, + /* 620 */ 256, 26, 703, 141, 9, 10, 747, 27, 7, 11, + /* 630 */ 21, 749, 22, 86, 269, 606, 602, 84, 600, 599, + /* 640 */ 598, 595, 280, 568, 94, 90, 31, 774, 59, 645, + /* 650 */ 644, 641, 590, 588, 98, 100, 580, 586, 582, 584, + /* 660 */ 578, 101, 576, 609, 608, 607, 605, 604, 290, 603, + /* 670 */ 601, 597, 596, 60, 566, 533, 531, 128, 790, 789, + /* 680 */ 789, 789, 789, 789, 129, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 191, 1, 190, 191, 210, 191, 191, 194, 195, 9, - /* 10 */ 188, 189, 191, 13, 14, 191, 16, 17, 191, 252, + /* 0 */ 191, 1, 190, 191, 210, 191, 191, 191, 191, 9, + /* 10 */ 188, 189, 252, 13, 14, 191, 16, 17, 191, 252, /* 20 */ 20, 21, 252, 23, 24, 25, 26, 27, 28, 262, /* 30 */ 236, 261, 262, 33, 34, 252, 252, 37, 38, 39, - /* 40 */ 13, 14, 233, 16, 17, 261, 262, 20, 21, 1, + /* 40 */ 13, 14, 226, 16, 17, 261, 262, 20, 21, 1, /* 50 */ 23, 24, 25, 26, 27, 28, 234, 9, 252, 232, - /* 60 */ 33, 34, 235, 210, 37, 38, 39, 14, 253, 16, - /* 70 */ 17, 249, 258, 20, 21, 254, 23, 24, 25, 26, - /* 80 */ 27, 28, 258, 5, 260, 197, 33, 34, 79, 236, + /* 60 */ 33, 34, 235, 254, 37, 38, 39, 14, 253, 16, + /* 70 */ 17, 249, 258, 20, 21, 258, 23, 24, 25, 26, + /* 80 */ 27, 28, 258, 76, 260, 79, 33, 34, 81, 68, /* 90 */ 37, 38, 39, 45, 46, 47, 48, 49, 50, 51, - /* 100 */ 52, 53, 54, 55, 56, 57, 58, 234, 252, 61, - /* 110 */ 110, 33, 34, 13, 14, 191, 16, 17, 81, 231, - /* 120 */ 20, 21, 249, 23, 24, 25, 26, 27, 28, 37, - /* 130 */ 38, 39, 105, 33, 34, 109, 210, 37, 38, 39, - /* 140 */ 13, 14, 116, 16, 17, 68, 191, 20, 21, 1, - /* 150 */ 23, 24, 25, 26, 27, 28, 232, 9, 76, 235, - /* 160 */ 33, 34, 236, 81, 37, 38, 39, 88, 89, 90, + /* 100 */ 52, 53, 54, 55, 56, 57, 58, 5, 197, 61, + /* 110 */ 110, 1, 81, 13, 14, 191, 16, 17, 252, 9, + /* 120 */ 20, 21, 0, 23, 24, 25, 26, 27, 28, 37, + /* 130 */ 38, 39, 105, 33, 34, 33, 34, 37, 38, 39, + /* 140 */ 13, 14, 231, 16, 17, 252, 191, 20, 21, 1, + /* 150 */ 23, 24, 25, 26, 27, 28, 232, 9, 137, 235, + /* 160 */ 33, 34, 141, 236, 37, 38, 39, 88, 89, 90, /* 170 */ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, /* 180 */ 101, 102, 209, 191, 211, 212, 213, 214, 215, 216, /* 190 */ 217, 218, 219, 220, 221, 222, 223, 224, 16, 17, - /* 200 */ 196, 252, 20, 21, 200, 23, 24, 25, 26, 27, - /* 210 */ 28, 256, 44, 258, 137, 33, 34, 140, 141, 37, + /* 200 */ 196, 79, 20, 21, 200, 23, 24, 25, 26, 27, + /* 210 */ 28, 256, 44, 258, 104, 33, 34, 198, 199, 37, /* 220 */ 38, 39, 1, 2, 232, 191, 5, 235, 7, 61, - /* 230 */ 9, 1, 2, 210, 252, 5, 68, 7, 135, 9, - /* 240 */ 191, 73, 74, 75, 37, 1, 143, 144, 25, 26, - /* 250 */ 27, 28, 104, 9, 33, 34, 33, 34, 37, 236, - /* 260 */ 37, 38, 39, 33, 34, 104, 232, 109, 88, 235, - /* 270 */ 90, 91, 104, 112, 5, 95, 7, 97, 98, 99, - /* 280 */ 112, 101, 102, 209, 235, 2, 212, 213, 5, 131, + /* 230 */ 9, 1, 2, 210, 104, 5, 68, 7, 108, 9, + /* 240 */ 110, 73, 74, 75, 65, 66, 67, 210, 25, 26, + /* 250 */ 27, 28, 234, 1, 33, 34, 33, 34, 37, 236, + /* 260 */ 37, 38, 39, 33, 34, 252, 232, 249, 88, 235, + /* 270 */ 90, 91, 104, 236, 5, 95, 7, 97, 98, 99, + /* 280 */ 112, 101, 102, 209, 252, 2, 212, 213, 5, 37, /* 290 */ 7, 217, 9, 219, 220, 221, 197, 223, 224, 62, - /* 300 */ 63, 64, 134, 0, 136, 191, 69, 70, 71, 72, - /* 310 */ 142, 62, 63, 64, 191, 78, 33, 34, 69, 70, - /* 320 */ 71, 72, 115, 68, 191, 104, 191, 228, 229, 230, - /* 330 */ 231, 5, 111, 7, 104, 62, 63, 64, 117, 198, - /* 340 */ 199, 111, 69, 70, 71, 72, 232, 117, 191, 235, - /* 350 */ 15, 33, 34, 197, 133, 37, 38, 39, 196, 5, - /* 360 */ 104, 7, 200, 133, 108, 232, 110, 232, 235, 60, - /* 370 */ 235, 65, 66, 67, 62, 63, 64, 237, 124, 125, - /* 380 */ 196, 258, 79, 226, 200, 229, 117, 105, 1, 105, - /* 390 */ 250, 109, 137, 227, 111, 140, 105, 113, 105, 105, - /* 400 */ 109, 105, 109, 109, 105, 109, 109, 105, 109, 191, - /* 410 */ 105, 109, 105, 104, 109, 105, 109, 252, 105, 109, - /* 420 */ 104, 252, 109, 107, 37, 252, 129, 138, 139, 252, - /* 430 */ 138, 139, 227, 138, 139, 104, 227, 106, 138, 139, - /* 440 */ 5, 252, 7, 117, 76, 77, 111, 252, 252, 252, - /* 450 */ 252, 252, 252, 252, 252, 252, 252, 191, 236, 227, - /* 460 */ 236, 227, 227, 227, 251, 191, 191, 234, 191, 191, - /* 470 */ 191, 259, 234, 259, 234, 60, 191, 238, 191, 191, - /* 480 */ 255, 191, 117, 255, 255, 239, 255, 244, 247, 191, - /* 490 */ 248, 246, 191, 245, 122, 191, 243, 128, 127, 191, - /* 500 */ 130, 126, 191, 121, 120, 191, 191, 242, 191, 119, - /* 510 */ 191, 191, 191, 191, 191, 191, 191, 241, 191, 191, - /* 520 */ 191, 191, 118, 191, 191, 191, 240, 191, 191, 191, - /* 530 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, - /* 540 */ 191, 191, 191, 191, 191, 132, 191, 191, 191, 191, - /* 550 */ 191, 191, 191, 103, 192, 192, 192, 87, 192, 192, - /* 560 */ 86, 50, 83, 85, 54, 192, 192, 84, 192, 82, - /* 570 */ 79, 5, 192, 192, 145, 197, 197, 5, 5, 145, - /* 580 */ 5, 5, 90, 89, 135, 113, 192, 192, 202, 107, - /* 590 */ 193, 208, 207, 204, 206, 203, 205, 201, 193, 193, - /* 600 */ 192, 192, 225, 193, 192, 198, 104, 114, 194, 105, - /* 610 */ 109, 105, 104, 1, 105, 76, 109, 104, 225, 104, - /* 620 */ 123, 105, 104, 109, 105, 104, 109, 104, 123, 104, - /* 630 */ 108, 111, 104, 107, 9, 5, 5, 5, 5, 5, - /* 640 */ 80, 15, 139, 76, 109, 16, 5, 5, 105, 5, - /* 650 */ 5, 139, 5, 5, 5, 5, 5, 5, 5, 5, - /* 660 */ 5, 5, 5, 139, 5, 5, 5, 5, 109, 80, - /* 670 */ 60, 0, 59, 263, 263, 263, 263, 263, 263, 21, + /* 300 */ 63, 64, 134, 252, 136, 191, 69, 70, 71, 72, + /* 310 */ 104, 143, 62, 63, 64, 78, 33, 34, 112, 69, + /* 320 */ 70, 71, 72, 37, 68, 104, 191, 228, 229, 230, + /* 330 */ 231, 75, 111, 191, 104, 62, 63, 64, 117, 210, + /* 340 */ 191, 111, 69, 70, 71, 72, 232, 117, 191, 235, + /* 350 */ 135, 33, 34, 197, 133, 37, 38, 39, 105, 144, + /* 360 */ 145, 15, 5, 133, 7, 236, 113, 232, 237, 109, + /* 370 */ 235, 62, 63, 64, 232, 194, 195, 235, 252, 196, + /* 380 */ 196, 250, 233, 200, 200, 229, 117, 124, 125, 109, + /* 390 */ 60, 131, 235, 137, 111, 252, 116, 141, 142, 105, + /* 400 */ 105, 115, 105, 109, 109, 105, 109, 105, 105, 109, + /* 410 */ 109, 109, 109, 105, 252, 105, 105, 109, 105, 109, + /* 420 */ 109, 105, 109, 104, 252, 109, 107, 138, 252, 140, + /* 430 */ 129, 138, 227, 140, 104, 104, 138, 106, 140, 5, + /* 440 */ 5, 7, 7, 138, 252, 140, 76, 77, 252, 252, + /* 450 */ 252, 252, 252, 252, 236, 227, 227, 111, 227, 227, + /* 460 */ 227, 227, 191, 191, 251, 191, 191, 191, 191, 234, + /* 470 */ 234, 259, 191, 259, 117, 234, 238, 60, 191, 191, + /* 480 */ 255, 191, 191, 191, 117, 103, 243, 191, 248, 247, + /* 490 */ 191, 122, 246, 245, 255, 191, 128, 191, 191, 255, + /* 500 */ 255, 130, 127, 191, 126, 191, 191, 121, 191, 244, + /* 510 */ 120, 191, 191, 191, 191, 191, 191, 191, 191, 191, + /* 520 */ 191, 242, 119, 191, 191, 191, 191, 191, 191, 191, + /* 530 */ 191, 191, 191, 191, 191, 191, 191, 118, 191, 191, + /* 540 */ 191, 191, 191, 191, 191, 191, 191, 191, 132, 191, + /* 550 */ 191, 191, 191, 87, 192, 192, 192, 192, 192, 86, + /* 560 */ 50, 83, 85, 54, 84, 82, 79, 5, 192, 192, + /* 570 */ 146, 192, 5, 5, 146, 5, 192, 192, 197, 5, + /* 580 */ 197, 90, 89, 135, 113, 109, 192, 105, 107, 104, + /* 590 */ 193, 105, 193, 202, 206, 208, 207, 205, 203, 201, + /* 600 */ 204, 192, 198, 193, 192, 225, 193, 192, 194, 114, + /* 610 */ 192, 104, 239, 241, 240, 109, 105, 104, 225, 1, + /* 620 */ 104, 109, 105, 104, 123, 123, 105, 109, 104, 104, + /* 630 */ 104, 111, 104, 76, 107, 9, 5, 108, 5, 5, + /* 640 */ 5, 5, 15, 80, 140, 76, 109, 5, 16, 5, + /* 650 */ 5, 105, 5, 5, 140, 140, 5, 5, 5, 5, + /* 660 */ 5, 139, 5, 5, 5, 5, 5, 5, 138, 5, + /* 670 */ 5, 5, 5, 109, 80, 60, 59, 21, 0, 263, /* 680 */ 263, 263, 263, 263, 21, 263, 263, 263, 263, 263, /* 690 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, /* 700 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, @@ -363,101 +374,104 @@ static const YYCODETYPE yy_lookahead[] = { /* 840 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, /* 850 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, /* 860 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, - /* 870 */ 263, 263, + /* 870 */ 263, 263, 263, }; -#define YY_SHIFT_COUNT (316) +#define YY_SHIFT_COUNT (320) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (671) +#define YY_SHIFT_MAX (678) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 168, 79, 79, 180, 180, 9, 221, 230, 244, 244, - /* 10 */ 244, 244, 244, 244, 244, 244, 244, 0, 48, 230, - /* 20 */ 283, 283, 283, 283, 148, 161, 244, 244, 244, 303, - /* 30 */ 244, 244, 82, 9, 37, 37, 685, 685, 685, 230, + /* 0 */ 168, 79, 79, 180, 180, 6, 221, 230, 148, 148, + /* 10 */ 148, 148, 148, 148, 148, 148, 148, 0, 48, 230, + /* 20 */ 283, 283, 283, 283, 110, 206, 148, 148, 148, 122, + /* 30 */ 148, 148, 7, 6, 31, 31, 685, 685, 685, 230, /* 40 */ 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, /* 50 */ 230, 230, 230, 230, 230, 230, 230, 230, 230, 283, - /* 60 */ 283, 78, 78, 78, 78, 78, 78, 78, 244, 244, - /* 70 */ 244, 207, 244, 161, 161, 244, 244, 244, 254, 254, - /* 80 */ 26, 161, 244, 244, 244, 244, 244, 244, 244, 244, - /* 90 */ 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, - /* 100 */ 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, - /* 110 */ 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, - /* 120 */ 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, - /* 130 */ 244, 244, 244, 415, 415, 415, 365, 365, 365, 415, - /* 140 */ 365, 415, 369, 370, 371, 372, 375, 382, 384, 390, - /* 150 */ 404, 413, 415, 415, 415, 450, 9, 9, 415, 415, - /* 160 */ 470, 474, 511, 479, 478, 510, 483, 487, 450, 415, - /* 170 */ 491, 491, 415, 491, 415, 491, 415, 415, 685, 685, - /* 180 */ 27, 100, 127, 100, 100, 53, 182, 223, 223, 223, - /* 190 */ 223, 237, 249, 273, 318, 318, 318, 318, 77, 103, - /* 200 */ 92, 92, 269, 326, 256, 255, 306, 312, 282, 284, - /* 210 */ 291, 293, 294, 296, 299, 387, 309, 335, 158, 297, - /* 220 */ 302, 305, 307, 310, 313, 316, 289, 292, 295, 331, - /* 230 */ 300, 354, 435, 368, 566, 429, 572, 573, 434, 575, - /* 240 */ 576, 492, 494, 449, 472, 482, 502, 493, 504, 501, - /* 250 */ 506, 508, 509, 507, 513, 612, 515, 516, 518, 514, - /* 260 */ 497, 517, 505, 519, 521, 520, 523, 482, 525, 526, - /* 270 */ 528, 522, 539, 625, 630, 631, 632, 633, 634, 560, - /* 280 */ 626, 567, 503, 535, 535, 629, 512, 524, 535, 641, - /* 290 */ 642, 543, 535, 644, 645, 647, 648, 649, 650, 651, - /* 300 */ 652, 653, 654, 655, 656, 657, 659, 660, 661, 662, - /* 310 */ 559, 589, 658, 663, 610, 613, 671, + /* 60 */ 283, 102, 102, 102, 102, 102, 102, 102, 148, 148, + /* 70 */ 148, 286, 148, 206, 206, 148, 148, 148, 263, 263, + /* 80 */ 280, 206, 148, 148, 148, 148, 148, 148, 148, 148, + /* 90 */ 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, + /* 100 */ 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, + /* 110 */ 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, + /* 120 */ 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, + /* 130 */ 148, 148, 148, 148, 417, 417, 417, 367, 367, 367, + /* 140 */ 417, 367, 417, 368, 371, 375, 369, 378, 386, 390, + /* 150 */ 403, 419, 416, 417, 417, 417, 382, 6, 6, 417, + /* 160 */ 417, 466, 473, 510, 478, 477, 509, 480, 483, 382, + /* 170 */ 417, 487, 487, 417, 487, 417, 487, 417, 417, 685, + /* 180 */ 685, 27, 100, 127, 100, 100, 53, 182, 223, 223, + /* 190 */ 223, 223, 237, 250, 273, 318, 318, 318, 318, 256, + /* 200 */ 215, 92, 92, 269, 357, 130, 21, 179, 309, 294, + /* 210 */ 253, 295, 297, 300, 302, 303, 252, 330, 346, 260, + /* 220 */ 301, 308, 310, 311, 313, 316, 319, 289, 293, 298, + /* 230 */ 331, 305, 434, 435, 370, 562, 424, 567, 568, 428, + /* 240 */ 570, 574, 491, 493, 448, 471, 481, 485, 495, 482, + /* 250 */ 476, 486, 507, 511, 506, 513, 618, 516, 517, 519, + /* 260 */ 512, 501, 518, 502, 521, 524, 520, 525, 481, 526, + /* 270 */ 527, 528, 529, 557, 626, 631, 633, 634, 635, 636, + /* 280 */ 563, 627, 569, 504, 537, 537, 632, 514, 515, 642, + /* 290 */ 522, 530, 537, 644, 645, 546, 537, 647, 648, 651, + /* 300 */ 652, 653, 654, 655, 657, 658, 659, 660, 661, 662, + /* 310 */ 664, 665, 666, 667, 564, 594, 656, 663, 615, 617, + /* 320 */ 678, }; -#define YY_REDUCE_COUNT (179) -#define YY_REDUCE_MIN (-233) -#define YY_REDUCE_MAX (414) +#define YY_REDUCE_COUNT (180) +#define YY_REDUCE_MIN (-240) +#define YY_REDUCE_MAX (418) static const short yy_reduce_ofst[] = { /* 0 */ -178, -27, -27, 74, 74, 99, -230, -216, -173, -176, - /* 10 */ -45, -76, -8, 34, 114, 133, 135, -185, -188, -233, - /* 20 */ -206, -147, -74, 23, -179, -127, -186, 123, -191, -112, - /* 30 */ 157, 49, 4, 156, 162, 184, 140, 141, -187, -217, - /* 40 */ -194, -144, -51, -18, 165, 169, 173, 177, 189, 195, - /* 50 */ 196, 197, 198, 199, 200, 201, 202, 203, 204, 222, - /* 60 */ 224, 166, 205, 209, 232, 234, 235, 236, 218, 266, - /* 70 */ 274, 213, 275, 233, 238, 277, 278, 279, 212, 214, - /* 80 */ 239, 240, 285, 287, 288, 290, 298, 301, 304, 308, - /* 90 */ 311, 314, 315, 317, 319, 320, 321, 322, 323, 324, - /* 100 */ 325, 327, 328, 329, 330, 332, 333, 334, 336, 337, - /* 110 */ 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, - /* 120 */ 348, 349, 350, 351, 352, 353, 355, 356, 357, 358, - /* 130 */ 359, 360, 361, 362, 363, 364, 225, 228, 229, 366, - /* 140 */ 231, 367, 242, 241, 245, 248, 243, 253, 265, 276, - /* 150 */ 286, 246, 373, 374, 376, 377, 378, 379, 380, 381, - /* 160 */ 383, 385, 388, 386, 391, 392, 389, 396, 393, 394, - /* 170 */ 397, 405, 395, 406, 408, 410, 409, 412, 407, 414, + /* 10 */ -45, -76, -8, 34, 114, 135, 142, -185, -188, -233, + /* 20 */ -206, 23, 37, 129, -191, 18, -186, -183, 149, -89, + /* 30 */ -184, 157, 4, 156, 183, 184, 131, 19, 181, -240, + /* 40 */ -217, -194, -134, -107, 13, 32, 51, 126, 143, 162, + /* 50 */ 172, 176, 192, 196, 197, 198, 199, 200, 201, -73, + /* 60 */ 218, 205, 228, 229, 231, 232, 233, 234, 271, 272, + /* 70 */ 274, 213, 275, 235, 236, 276, 277, 281, 212, 214, + /* 80 */ 238, 241, 287, 288, 290, 291, 292, 296, 299, 304, + /* 90 */ 306, 307, 312, 314, 315, 317, 320, 321, 322, 323, + /* 100 */ 324, 325, 326, 327, 328, 329, 332, 333, 334, 335, + /* 110 */ 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, + /* 120 */ 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, + /* 130 */ 358, 359, 360, 361, 362, 363, 364, 225, 239, 244, + /* 140 */ 365, 245, 366, 240, 242, 246, 248, 265, 243, 279, + /* 150 */ 372, 374, 373, 376, 377, 379, 380, 381, 383, 384, + /* 160 */ 385, 387, 389, 388, 391, 392, 395, 396, 398, 393, + /* 170 */ 394, 397, 399, 409, 410, 412, 413, 415, 418, 404, + /* 180 */ 414, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 781, 894, 840, 906, 828, 837, 1037, 1037, 781, 781, - /* 10 */ 781, 781, 781, 781, 781, 781, 781, 953, 800, 1037, - /* 20 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 837, - /* 30 */ 781, 781, 843, 837, 843, 843, 948, 878, 896, 781, - /* 40 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, - /* 50 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, - /* 60 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, - /* 70 */ 781, 955, 958, 781, 781, 960, 781, 781, 980, 980, - /* 80 */ 946, 781, 781, 781, 781, 781, 781, 781, 781, 781, - /* 90 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, - /* 100 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 826, - /* 110 */ 781, 824, 781, 781, 781, 781, 781, 781, 781, 781, - /* 120 */ 781, 781, 781, 781, 781, 781, 811, 781, 781, 781, - /* 130 */ 781, 781, 781, 802, 802, 802, 781, 781, 781, 802, - /* 140 */ 781, 802, 987, 991, 985, 973, 981, 972, 968, 966, - /* 150 */ 965, 995, 802, 802, 802, 841, 837, 837, 802, 802, - /* 160 */ 859, 857, 855, 847, 853, 849, 851, 845, 829, 802, - /* 170 */ 835, 835, 802, 835, 802, 835, 802, 802, 878, 896, - /* 180 */ 781, 996, 781, 1036, 986, 1026, 1025, 1032, 1024, 1023, - /* 190 */ 1022, 781, 781, 781, 1018, 1019, 1021, 1020, 781, 781, - /* 200 */ 1028, 1027, 781, 781, 781, 781, 781, 781, 781, 781, - /* 210 */ 781, 781, 781, 781, 781, 781, 998, 781, 992, 988, - /* 220 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 908, - /* 230 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, - /* 240 */ 781, 781, 781, 781, 945, 781, 781, 781, 781, 956, - /* 250 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 982, - /* 260 */ 781, 974, 781, 781, 781, 781, 781, 920, 781, 781, - /* 270 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, - /* 280 */ 781, 781, 781, 1048, 1046, 781, 781, 781, 1042, 781, - /* 290 */ 781, 781, 1040, 781, 781, 781, 781, 781, 781, 781, - /* 300 */ 781, 781, 781, 781, 781, 781, 781, 781, 781, 781, - /* 310 */ 862, 781, 809, 807, 781, 798, 781, + /* 0 */ 787, 900, 846, 912, 834, 843, 1043, 1043, 787, 787, + /* 10 */ 787, 787, 787, 787, 787, 787, 787, 959, 806, 1043, + /* 20 */ 787, 787, 787, 787, 787, 787, 787, 787, 787, 843, + /* 30 */ 787, 787, 849, 843, 849, 849, 954, 884, 902, 787, + /* 40 */ 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 50 */ 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 60 */ 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 70 */ 787, 961, 964, 787, 787, 966, 787, 787, 986, 986, + /* 80 */ 952, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 90 */ 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 100 */ 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 110 */ 832, 787, 830, 787, 787, 787, 787, 787, 787, 787, + /* 120 */ 787, 787, 787, 787, 787, 787, 787, 817, 787, 787, + /* 130 */ 787, 787, 787, 787, 808, 808, 808, 787, 787, 787, + /* 140 */ 808, 787, 808, 993, 997, 991, 979, 987, 978, 974, + /* 150 */ 972, 971, 1001, 808, 808, 808, 847, 843, 843, 808, + /* 160 */ 808, 865, 863, 861, 853, 859, 855, 857, 851, 835, + /* 170 */ 808, 841, 841, 808, 841, 808, 841, 808, 808, 884, + /* 180 */ 902, 787, 1002, 787, 1042, 992, 1032, 1031, 1038, 1030, + /* 190 */ 1029, 1028, 787, 787, 787, 1024, 1025, 1027, 1026, 787, + /* 200 */ 787, 1034, 1033, 787, 787, 787, 787, 787, 787, 787, + /* 210 */ 787, 787, 787, 787, 787, 787, 787, 1004, 787, 998, + /* 220 */ 994, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 230 */ 914, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 240 */ 787, 787, 787, 787, 787, 951, 787, 787, 787, 787, + /* 250 */ 962, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 260 */ 988, 787, 980, 787, 787, 787, 787, 787, 926, 787, + /* 270 */ 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 280 */ 787, 787, 787, 787, 1055, 1053, 787, 787, 787, 787, + /* 290 */ 787, 787, 1049, 787, 787, 787, 1046, 787, 787, 787, + /* 300 */ 787, 787, 787, 787, 787, 787, 787, 787, 787, 787, + /* 310 */ 787, 787, 787, 787, 868, 787, 815, 813, 787, 804, + /* 320 */ 787, }; /********** End of lemon-generated parsing tables *****************************/ @@ -616,6 +630,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* SYNCDB => nothing */ 0, /* ADD => nothing */ 0, /* COLUMN => nothing */ + 0, /* LENGTH => nothing */ 0, /* TAG => nothing */ 0, /* CHANGE => nothing */ 0, /* SET => nothing */ @@ -703,6 +718,7 @@ struct yyParser { int yyerrcnt; /* Shifts left before out of the error */ #endif ParseARG_SDECL /* A place to hold %extra_argument */ + ParseCTX_SDECL /* A place to hold %extra_context */ #if YYSTACKDEPTH<=0 int yystksz; /* Current side of the stack */ yyStackEntry *yystack; /* The parser's stack */ @@ -889,55 +905,55 @@ static const char *const yyTokenName[] = { /* 136 */ "SYNCDB", /* 137 */ "ADD", /* 138 */ "COLUMN", - /* 139 */ "TAG", - /* 140 */ "CHANGE", - /* 141 */ "SET", - /* 142 */ "KILL", - /* 143 */ "CONNECTION", - /* 144 */ "STREAM", - /* 145 */ "COLON", - /* 146 */ "ABORT", - /* 147 */ "AFTER", - /* 148 */ "ATTACH", - /* 149 */ "BEFORE", - /* 150 */ "BEGIN", - /* 151 */ "CASCADE", - /* 152 */ "CLUSTER", - /* 153 */ "CONFLICT", - /* 154 */ "COPY", - /* 155 */ "DEFERRED", - /* 156 */ "DELIMITERS", - /* 157 */ "DETACH", - /* 158 */ "EACH", - /* 159 */ "END", - /* 160 */ "EXPLAIN", - /* 161 */ "FAIL", - /* 162 */ "FOR", - /* 163 */ "IGNORE", - /* 164 */ "IMMEDIATE", - /* 165 */ "INITIALLY", - /* 166 */ "INSTEAD", - /* 167 */ "MATCH", - /* 168 */ "KEY", - /* 169 */ "OF", - /* 170 */ "RAISE", - /* 171 */ "REPLACE", - /* 172 */ "RESTRICT", - /* 173 */ "ROW", - /* 174 */ "STATEMENT", - /* 175 */ "TRIGGER", - /* 176 */ "VIEW", - /* 177 */ "SEMI", - /* 178 */ "NONE", - /* 179 */ "PREV", - /* 180 */ "LINEAR", - /* 181 */ "IMPORT", - /* 182 */ "TBNAME", - /* 183 */ "JOIN", - /* 184 */ "INSERT", - /* 185 */ "INTO", - /* 186 */ "VALUES", - /* 187 */ "error", + /* 139 */ "LENGTH", + /* 140 */ "TAG", + /* 141 */ "CHANGE", + /* 142 */ "SET", + /* 143 */ "KILL", + /* 144 */ "CONNECTION", + /* 145 */ "STREAM", + /* 146 */ "COLON", + /* 147 */ "ABORT", + /* 148 */ "AFTER", + /* 149 */ "ATTACH", + /* 150 */ "BEFORE", + /* 151 */ "BEGIN", + /* 152 */ "CASCADE", + /* 153 */ "CLUSTER", + /* 154 */ "CONFLICT", + /* 155 */ "COPY", + /* 156 */ "DEFERRED", + /* 157 */ "DELIMITERS", + /* 158 */ "DETACH", + /* 159 */ "EACH", + /* 160 */ "END", + /* 161 */ "EXPLAIN", + /* 162 */ "FAIL", + /* 163 */ "FOR", + /* 164 */ "IGNORE", + /* 165 */ "IMMEDIATE", + /* 166 */ "INITIALLY", + /* 167 */ "INSTEAD", + /* 168 */ "MATCH", + /* 169 */ "KEY", + /* 170 */ "OF", + /* 171 */ "RAISE", + /* 172 */ "REPLACE", + /* 173 */ "RESTRICT", + /* 174 */ "ROW", + /* 175 */ "STATEMENT", + /* 176 */ "TRIGGER", + /* 177 */ "VIEW", + /* 178 */ "SEMI", + /* 179 */ "NONE", + /* 180 */ "PREV", + /* 181 */ "LINEAR", + /* 182 */ "IMPORT", + /* 183 */ "TBNAME", + /* 184 */ "JOIN", + /* 185 */ "INSERT", + /* 186 */ "INTO", + /* 187 */ "VALUES", /* 188 */ "program", /* 189 */ "cmd", /* 190 */ "dbPrefix", @@ -1278,18 +1294,19 @@ static const char *const yyRuleName[] = { /* 255 */ "cmd ::= SYNCDB ids REPLICA", /* 256 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", /* 257 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 258 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 259 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 260 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 261 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 262 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 263 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 264 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 265 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 266 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 267 */ "cmd ::= KILL CONNECTION INTEGER", - /* 268 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 269 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 258 */ "cmd ::= ALTER TABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER", + /* 259 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 260 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 261 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 262 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 263 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 264 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 265 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 266 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 267 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 268 */ "cmd ::= KILL CONNECTION INTEGER", + /* 269 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 270 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1338,28 +1355,29 @@ static int yyGrowStack(yyParser *p){ /* Initialize a new parser that has already been allocated. */ -void ParseInit(void *yypParser){ - yyParser *pParser = (yyParser*)yypParser; +void ParseInit(void *yypRawParser ParseCTX_PDECL){ + yyParser *yypParser = (yyParser*)yypRawParser; + ParseCTX_STORE #ifdef YYTRACKMAXSTACKDEPTH - pParser->yyhwm = 0; + yypParser->yyhwm = 0; #endif #if YYSTACKDEPTH<=0 - pParser->yytos = NULL; - pParser->yystack = NULL; - pParser->yystksz = 0; - if( yyGrowStack(pParser) ){ - pParser->yystack = &pParser->yystk0; - pParser->yystksz = 1; + yypParser->yytos = NULL; + yypParser->yystack = NULL; + yypParser->yystksz = 0; + if( yyGrowStack(yypParser) ){ + yypParser->yystack = &yypParser->yystk0; + yypParser->yystksz = 1; } #endif #ifndef YYNOERRORRECOVERY - pParser->yyerrcnt = -1; + yypParser->yyerrcnt = -1; #endif - pParser->yytos = pParser->yystack; - pParser->yystack[0].stateno = 0; - pParser->yystack[0].major = 0; + yypParser->yytos = yypParser->yystack; + yypParser->yystack[0].stateno = 0; + yypParser->yystack[0].major = 0; #if YYSTACKDEPTH>0 - pParser->yystackEnd = &pParser->yystack[YYSTACKDEPTH-1]; + yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1]; #endif } @@ -1376,11 +1394,14 @@ void ParseInit(void *yypParser){ ** A pointer to a parser. This pointer is used in subsequent calls ** to Parse and ParseFree. */ -void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ - yyParser *pParser; - pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); - if( pParser ) ParseInit(pParser); - return pParser; +void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){ + yyParser *yypParser; + yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); + if( yypParser ){ + ParseCTX_STORE + ParseInit(yypParser ParseCTX_PARAM); + } + return (void*)yypParser; } #endif /* Parse_ENGINEALWAYSONSTACK */ @@ -1397,7 +1418,8 @@ static void yy_destructor( YYCODETYPE yymajor, /* Type code for object to destroy */ YYMINORTYPE *yypminor /* The object to be destroyed */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH switch( yymajor ){ /* Here is inserted the actions which take place when a ** terminal or non-terminal is destroyed. This can happen @@ -1573,13 +1595,12 @@ int ParseCoverage(FILE *out){ ** Find the appropriate action for a parser given the terminal ** look-ahead token iLookAhead. */ -static unsigned int yy_find_shift_action( - yyParser *pParser, /* The parser */ - YYCODETYPE iLookAhead /* The look-ahead token */ +static YYACTIONTYPE yy_find_shift_action( + YYCODETYPE iLookAhead, /* The look-ahead token */ + YYACTIONTYPE stateno /* Current state number */ ){ int i; - int stateno = pParser->yytos->stateno; - + if( stateno>YY_MAX_SHIFT ) return stateno; assert( stateno <= YY_SHIFT_COUNT ); #if defined(YYCOVERAGE) @@ -1587,15 +1608,19 @@ static unsigned int yy_find_shift_action( #endif do{ i = yy_shift_ofst[stateno]; - assert( i>=0 && i+YYNTOKEN<=sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) ); + assert( i>=0 ); + assert( i<=YY_ACTTAB_COUNT ); + assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); assert( iLookAhead!=YYNOCODE ); assert( iLookAhead < YYNTOKEN ); i += iLookAhead; + assert( i<(int)YY_NLOOKAHEAD ); if( yy_lookahead[i]!=iLookAhead ){ #ifdef YYFALLBACK YYCODETYPE iFallback; /* Fallback token */ - if( iLookAhead %s\n", @@ -1610,15 +1635,8 @@ static unsigned int yy_find_shift_action( #ifdef YYWILDCARD { int j = i - iLookAhead + YYWILDCARD; - if( -#if YY_SHIFT_MIN+YYWILDCARD<0 - j>=0 && -#endif -#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT - j0 - ){ + assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) ); + if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){ #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", @@ -1632,6 +1650,7 @@ static unsigned int yy_find_shift_action( #endif /* YYWILDCARD */ return yy_default[stateno]; }else{ + assert( i>=0 && iyytos; - yytos->stateno = (YYACTIONTYPE)yyNewState; - yytos->major = (YYCODETYPE)yyMajor; + yytos->stateno = yyNewState; + yytos->major = yyMajor; yytos->minor.yy0 = yyMinor; yyTraceShift(yypParser, yyNewState, "Shift"); } -/* The following table contains information about every rule that -** is used during the reduce. -*/ -static const struct { - YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ - signed char nrhs; /* Negative of the number of RHS symbols in the rule */ -} yyRuleInfo[] = { - { 188, -1 }, /* (0) program ::= cmd */ - { 189, -2 }, /* (1) cmd ::= SHOW DATABASES */ - { 189, -2 }, /* (2) cmd ::= SHOW TOPICS */ - { 189, -2 }, /* (3) cmd ::= SHOW MNODES */ - { 189, -2 }, /* (4) cmd ::= SHOW DNODES */ - { 189, -2 }, /* (5) cmd ::= SHOW ACCOUNTS */ - { 189, -2 }, /* (6) cmd ::= SHOW USERS */ - { 189, -2 }, /* (7) cmd ::= SHOW MODULES */ - { 189, -2 }, /* (8) cmd ::= SHOW QUERIES */ - { 189, -2 }, /* (9) cmd ::= SHOW CONNECTIONS */ - { 189, -2 }, /* (10) cmd ::= SHOW STREAMS */ - { 189, -2 }, /* (11) cmd ::= SHOW VARIABLES */ - { 189, -2 }, /* (12) cmd ::= SHOW SCORES */ - { 189, -2 }, /* (13) cmd ::= SHOW GRANTS */ - { 189, -2 }, /* (14) cmd ::= SHOW VNODES */ - { 189, -3 }, /* (15) cmd ::= SHOW VNODES IPTOKEN */ - { 190, 0 }, /* (16) dbPrefix ::= */ - { 190, -2 }, /* (17) dbPrefix ::= ids DOT */ - { 192, 0 }, /* (18) cpxName ::= */ - { 192, -2 }, /* (19) cpxName ::= DOT ids */ - { 189, -5 }, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ - { 189, -5 }, /* (21) cmd ::= SHOW CREATE STABLE ids cpxName */ - { 189, -4 }, /* (22) cmd ::= SHOW CREATE DATABASE ids */ - { 189, -3 }, /* (23) cmd ::= SHOW dbPrefix TABLES */ - { 189, -5 }, /* (24) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - { 189, -3 }, /* (25) cmd ::= SHOW dbPrefix STABLES */ - { 189, -5 }, /* (26) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - { 189, -3 }, /* (27) cmd ::= SHOW dbPrefix VGROUPS */ - { 189, -4 }, /* (28) cmd ::= SHOW dbPrefix VGROUPS ids */ - { 189, -5 }, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ - { 189, -5 }, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ - { 189, -4 }, /* (31) cmd ::= DROP DATABASE ifexists ids */ - { 189, -4 }, /* (32) cmd ::= DROP TOPIC ifexists ids */ - { 189, -3 }, /* (33) cmd ::= DROP DNODE ids */ - { 189, -3 }, /* (34) cmd ::= DROP USER ids */ - { 189, -3 }, /* (35) cmd ::= DROP ACCOUNT ids */ - { 189, -2 }, /* (36) cmd ::= USE ids */ - { 189, -3 }, /* (37) cmd ::= DESCRIBE ids cpxName */ - { 189, -5 }, /* (38) cmd ::= ALTER USER ids PASS ids */ - { 189, -5 }, /* (39) cmd ::= ALTER USER ids PRIVILEGE ids */ - { 189, -4 }, /* (40) cmd ::= ALTER DNODE ids ids */ - { 189, -5 }, /* (41) cmd ::= ALTER DNODE ids ids ids */ - { 189, -3 }, /* (42) cmd ::= ALTER LOCAL ids */ - { 189, -4 }, /* (43) cmd ::= ALTER LOCAL ids ids */ - { 189, -4 }, /* (44) cmd ::= ALTER DATABASE ids alter_db_optr */ - { 189, -4 }, /* (45) cmd ::= ALTER TOPIC ids alter_topic_optr */ - { 189, -4 }, /* (46) cmd ::= ALTER ACCOUNT ids acct_optr */ - { 189, -6 }, /* (47) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - { 191, -1 }, /* (48) ids ::= ID */ - { 191, -1 }, /* (49) ids ::= STRING */ - { 193, -2 }, /* (50) ifexists ::= IF EXISTS */ - { 193, 0 }, /* (51) ifexists ::= */ - { 197, -3 }, /* (52) ifnotexists ::= IF NOT EXISTS */ - { 197, 0 }, /* (53) ifnotexists ::= */ - { 189, -3 }, /* (54) cmd ::= CREATE DNODE ids */ - { 189, -6 }, /* (55) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - { 189, -5 }, /* (56) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - { 189, -5 }, /* (57) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - { 189, -5 }, /* (58) cmd ::= CREATE USER ids PASS ids */ - { 200, 0 }, /* (59) pps ::= */ - { 200, -2 }, /* (60) pps ::= PPS INTEGER */ - { 201, 0 }, /* (61) tseries ::= */ - { 201, -2 }, /* (62) tseries ::= TSERIES INTEGER */ - { 202, 0 }, /* (63) dbs ::= */ - { 202, -2 }, /* (64) dbs ::= DBS INTEGER */ - { 203, 0 }, /* (65) streams ::= */ - { 203, -2 }, /* (66) streams ::= STREAMS INTEGER */ - { 204, 0 }, /* (67) storage ::= */ - { 204, -2 }, /* (68) storage ::= STORAGE INTEGER */ - { 205, 0 }, /* (69) qtime ::= */ - { 205, -2 }, /* (70) qtime ::= QTIME INTEGER */ - { 206, 0 }, /* (71) users ::= */ - { 206, -2 }, /* (72) users ::= USERS INTEGER */ - { 207, 0 }, /* (73) conns ::= */ - { 207, -2 }, /* (74) conns ::= CONNS INTEGER */ - { 208, 0 }, /* (75) state ::= */ - { 208, -2 }, /* (76) state ::= STATE ids */ - { 196, -9 }, /* (77) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - { 209, -2 }, /* (78) keep ::= KEEP tagitemlist */ - { 211, -2 }, /* (79) cache ::= CACHE INTEGER */ - { 212, -2 }, /* (80) replica ::= REPLICA INTEGER */ - { 213, -2 }, /* (81) quorum ::= QUORUM INTEGER */ - { 214, -2 }, /* (82) days ::= DAYS INTEGER */ - { 215, -2 }, /* (83) minrows ::= MINROWS INTEGER */ - { 216, -2 }, /* (84) maxrows ::= MAXROWS INTEGER */ - { 217, -2 }, /* (85) blocks ::= BLOCKS INTEGER */ - { 218, -2 }, /* (86) ctime ::= CTIME INTEGER */ - { 219, -2 }, /* (87) wal ::= WAL INTEGER */ - { 220, -2 }, /* (88) fsync ::= FSYNC INTEGER */ - { 221, -2 }, /* (89) comp ::= COMP INTEGER */ - { 222, -2 }, /* (90) prec ::= PRECISION STRING */ - { 223, -2 }, /* (91) update ::= UPDATE INTEGER */ - { 224, -2 }, /* (92) cachelast ::= CACHELAST INTEGER */ - { 225, -2 }, /* (93) partitions ::= PARTITIONS INTEGER */ - { 198, 0 }, /* (94) db_optr ::= */ - { 198, -2 }, /* (95) db_optr ::= db_optr cache */ - { 198, -2 }, /* (96) db_optr ::= db_optr replica */ - { 198, -2 }, /* (97) db_optr ::= db_optr quorum */ - { 198, -2 }, /* (98) db_optr ::= db_optr days */ - { 198, -2 }, /* (99) db_optr ::= db_optr minrows */ - { 198, -2 }, /* (100) db_optr ::= db_optr maxrows */ - { 198, -2 }, /* (101) db_optr ::= db_optr blocks */ - { 198, -2 }, /* (102) db_optr ::= db_optr ctime */ - { 198, -2 }, /* (103) db_optr ::= db_optr wal */ - { 198, -2 }, /* (104) db_optr ::= db_optr fsync */ - { 198, -2 }, /* (105) db_optr ::= db_optr comp */ - { 198, -2 }, /* (106) db_optr ::= db_optr prec */ - { 198, -2 }, /* (107) db_optr ::= db_optr keep */ - { 198, -2 }, /* (108) db_optr ::= db_optr update */ - { 198, -2 }, /* (109) db_optr ::= db_optr cachelast */ - { 199, -1 }, /* (110) topic_optr ::= db_optr */ - { 199, -2 }, /* (111) topic_optr ::= topic_optr partitions */ - { 194, 0 }, /* (112) alter_db_optr ::= */ - { 194, -2 }, /* (113) alter_db_optr ::= alter_db_optr replica */ - { 194, -2 }, /* (114) alter_db_optr ::= alter_db_optr quorum */ - { 194, -2 }, /* (115) alter_db_optr ::= alter_db_optr keep */ - { 194, -2 }, /* (116) alter_db_optr ::= alter_db_optr blocks */ - { 194, -2 }, /* (117) alter_db_optr ::= alter_db_optr comp */ - { 194, -2 }, /* (118) alter_db_optr ::= alter_db_optr wal */ - { 194, -2 }, /* (119) alter_db_optr ::= alter_db_optr fsync */ - { 194, -2 }, /* (120) alter_db_optr ::= alter_db_optr update */ - { 194, -2 }, /* (121) alter_db_optr ::= alter_db_optr cachelast */ - { 195, -1 }, /* (122) alter_topic_optr ::= alter_db_optr */ - { 195, -2 }, /* (123) alter_topic_optr ::= alter_topic_optr partitions */ - { 226, -1 }, /* (124) typename ::= ids */ - { 226, -4 }, /* (125) typename ::= ids LP signed RP */ - { 226, -2 }, /* (126) typename ::= ids UNSIGNED */ - { 227, -1 }, /* (127) signed ::= INTEGER */ - { 227, -2 }, /* (128) signed ::= PLUS INTEGER */ - { 227, -2 }, /* (129) signed ::= MINUS INTEGER */ - { 189, -3 }, /* (130) cmd ::= CREATE TABLE create_table_args */ - { 189, -3 }, /* (131) cmd ::= CREATE TABLE create_stable_args */ - { 189, -3 }, /* (132) cmd ::= CREATE STABLE create_stable_args */ - { 189, -3 }, /* (133) cmd ::= CREATE TABLE create_table_list */ - { 230, -1 }, /* (134) create_table_list ::= create_from_stable */ - { 230, -2 }, /* (135) create_table_list ::= create_table_list create_from_stable */ - { 228, -6 }, /* (136) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - { 229, -10 }, /* (137) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - { 231, -10 }, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - { 231, -13 }, /* (139) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - { 233, -3 }, /* (140) tagNamelist ::= tagNamelist COMMA ids */ - { 233, -1 }, /* (141) tagNamelist ::= ids */ - { 228, -5 }, /* (142) create_table_args ::= ifnotexists ids cpxName AS select */ - { 232, -3 }, /* (143) columnlist ::= columnlist COMMA column */ - { 232, -1 }, /* (144) columnlist ::= column */ - { 235, -2 }, /* (145) column ::= ids typename */ - { 210, -3 }, /* (146) tagitemlist ::= tagitemlist COMMA tagitem */ - { 210, -1 }, /* (147) tagitemlist ::= tagitem */ - { 236, -1 }, /* (148) tagitem ::= INTEGER */ - { 236, -1 }, /* (149) tagitem ::= FLOAT */ - { 236, -1 }, /* (150) tagitem ::= STRING */ - { 236, -1 }, /* (151) tagitem ::= BOOL */ - { 236, -1 }, /* (152) tagitem ::= NULL */ - { 236, -2 }, /* (153) tagitem ::= MINUS INTEGER */ - { 236, -2 }, /* (154) tagitem ::= MINUS FLOAT */ - { 236, -2 }, /* (155) tagitem ::= PLUS INTEGER */ - { 236, -2 }, /* (156) tagitem ::= PLUS FLOAT */ - { 234, -13 }, /* (157) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - { 234, -3 }, /* (158) select ::= LP select RP */ - { 249, -1 }, /* (159) union ::= select */ - { 249, -4 }, /* (160) union ::= union UNION ALL select */ - { 189, -1 }, /* (161) cmd ::= union */ - { 234, -2 }, /* (162) select ::= SELECT selcollist */ - { 250, -2 }, /* (163) sclp ::= selcollist COMMA */ - { 250, 0 }, /* (164) sclp ::= */ - { 237, -4 }, /* (165) selcollist ::= sclp distinct expr as */ - { 237, -2 }, /* (166) selcollist ::= sclp STAR */ - { 253, -2 }, /* (167) as ::= AS ids */ - { 253, -1 }, /* (168) as ::= ids */ - { 253, 0 }, /* (169) as ::= */ - { 251, -1 }, /* (170) distinct ::= DISTINCT */ - { 251, 0 }, /* (171) distinct ::= */ - { 238, -2 }, /* (172) from ::= FROM tablelist */ - { 238, -4 }, /* (173) from ::= FROM LP union RP */ - { 254, -2 }, /* (174) tablelist ::= ids cpxName */ - { 254, -3 }, /* (175) tablelist ::= ids cpxName ids */ - { 254, -4 }, /* (176) tablelist ::= tablelist COMMA ids cpxName */ - { 254, -5 }, /* (177) tablelist ::= tablelist COMMA ids cpxName ids */ - { 255, -1 }, /* (178) tmvar ::= VARIABLE */ - { 240, -4 }, /* (179) interval_opt ::= INTERVAL LP tmvar RP */ - { 240, -6 }, /* (180) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - { 240, 0 }, /* (181) interval_opt ::= */ - { 241, 0 }, /* (182) session_option ::= */ - { 241, -7 }, /* (183) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - { 242, 0 }, /* (184) fill_opt ::= */ - { 242, -6 }, /* (185) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 242, -4 }, /* (186) fill_opt ::= FILL LP ID RP */ - { 243, -4 }, /* (187) sliding_opt ::= SLIDING LP tmvar RP */ - { 243, 0 }, /* (188) sliding_opt ::= */ - { 245, 0 }, /* (189) orderby_opt ::= */ - { 245, -3 }, /* (190) orderby_opt ::= ORDER BY sortlist */ - { 256, -4 }, /* (191) sortlist ::= sortlist COMMA item sortorder */ - { 256, -2 }, /* (192) sortlist ::= item sortorder */ - { 258, -2 }, /* (193) item ::= ids cpxName */ - { 259, -1 }, /* (194) sortorder ::= ASC */ - { 259, -1 }, /* (195) sortorder ::= DESC */ - { 259, 0 }, /* (196) sortorder ::= */ - { 244, 0 }, /* (197) groupby_opt ::= */ - { 244, -3 }, /* (198) groupby_opt ::= GROUP BY grouplist */ - { 260, -3 }, /* (199) grouplist ::= grouplist COMMA item */ - { 260, -1 }, /* (200) grouplist ::= item */ - { 246, 0 }, /* (201) having_opt ::= */ - { 246, -2 }, /* (202) having_opt ::= HAVING expr */ - { 248, 0 }, /* (203) limit_opt ::= */ - { 248, -2 }, /* (204) limit_opt ::= LIMIT signed */ - { 248, -4 }, /* (205) limit_opt ::= LIMIT signed OFFSET signed */ - { 248, -4 }, /* (206) limit_opt ::= LIMIT signed COMMA signed */ - { 247, 0 }, /* (207) slimit_opt ::= */ - { 247, -2 }, /* (208) slimit_opt ::= SLIMIT signed */ - { 247, -4 }, /* (209) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 247, -4 }, /* (210) slimit_opt ::= SLIMIT signed COMMA signed */ - { 239, 0 }, /* (211) where_opt ::= */ - { 239, -2 }, /* (212) where_opt ::= WHERE expr */ - { 252, -3 }, /* (213) expr ::= LP expr RP */ - { 252, -1 }, /* (214) expr ::= ID */ - { 252, -3 }, /* (215) expr ::= ID DOT ID */ - { 252, -3 }, /* (216) expr ::= ID DOT STAR */ - { 252, -1 }, /* (217) expr ::= INTEGER */ - { 252, -2 }, /* (218) expr ::= MINUS INTEGER */ - { 252, -2 }, /* (219) expr ::= PLUS INTEGER */ - { 252, -1 }, /* (220) expr ::= FLOAT */ - { 252, -2 }, /* (221) expr ::= MINUS FLOAT */ - { 252, -2 }, /* (222) expr ::= PLUS FLOAT */ - { 252, -1 }, /* (223) expr ::= STRING */ - { 252, -1 }, /* (224) expr ::= NOW */ - { 252, -1 }, /* (225) expr ::= VARIABLE */ - { 252, -2 }, /* (226) expr ::= PLUS VARIABLE */ - { 252, -2 }, /* (227) expr ::= MINUS VARIABLE */ - { 252, -1 }, /* (228) expr ::= BOOL */ - { 252, -1 }, /* (229) expr ::= NULL */ - { 252, -4 }, /* (230) expr ::= ID LP exprlist RP */ - { 252, -4 }, /* (231) expr ::= ID LP STAR RP */ - { 252, -3 }, /* (232) expr ::= expr IS NULL */ - { 252, -4 }, /* (233) expr ::= expr IS NOT NULL */ - { 252, -3 }, /* (234) expr ::= expr LT expr */ - { 252, -3 }, /* (235) expr ::= expr GT expr */ - { 252, -3 }, /* (236) expr ::= expr LE expr */ - { 252, -3 }, /* (237) expr ::= expr GE expr */ - { 252, -3 }, /* (238) expr ::= expr NE expr */ - { 252, -3 }, /* (239) expr ::= expr EQ expr */ - { 252, -5 }, /* (240) expr ::= expr BETWEEN expr AND expr */ - { 252, -3 }, /* (241) expr ::= expr AND expr */ - { 252, -3 }, /* (242) expr ::= expr OR expr */ - { 252, -3 }, /* (243) expr ::= expr PLUS expr */ - { 252, -3 }, /* (244) expr ::= expr MINUS expr */ - { 252, -3 }, /* (245) expr ::= expr STAR expr */ - { 252, -3 }, /* (246) expr ::= expr SLASH expr */ - { 252, -3 }, /* (247) expr ::= expr REM expr */ - { 252, -3 }, /* (248) expr ::= expr LIKE expr */ - { 252, -5 }, /* (249) expr ::= expr IN LP exprlist RP */ - { 261, -3 }, /* (250) exprlist ::= exprlist COMMA expritem */ - { 261, -1 }, /* (251) exprlist ::= expritem */ - { 262, -1 }, /* (252) expritem ::= expr */ - { 262, 0 }, /* (253) expritem ::= */ - { 189, -3 }, /* (254) cmd ::= RESET QUERY CACHE */ - { 189, -3 }, /* (255) cmd ::= SYNCDB ids REPLICA */ - { 189, -7 }, /* (256) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 189, -7 }, /* (257) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 189, -7 }, /* (258) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 189, -7 }, /* (259) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 189, -8 }, /* (260) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 189, -9 }, /* (261) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 189, -7 }, /* (262) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - { 189, -7 }, /* (263) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - { 189, -7 }, /* (264) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - { 189, -7 }, /* (265) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - { 189, -8 }, /* (266) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - { 189, -3 }, /* (267) cmd ::= KILL CONNECTION INTEGER */ - { 189, -5 }, /* (268) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 189, -5 }, /* (269) cmd ::= KILL QUERY INTEGER COLON INTEGER */ +/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side +** of that rule */ +static const YYCODETYPE yyRuleInfoLhs[] = { + 188, /* (0) program ::= cmd */ + 189, /* (1) cmd ::= SHOW DATABASES */ + 189, /* (2) cmd ::= SHOW TOPICS */ + 189, /* (3) cmd ::= SHOW MNODES */ + 189, /* (4) cmd ::= SHOW DNODES */ + 189, /* (5) cmd ::= SHOW ACCOUNTS */ + 189, /* (6) cmd ::= SHOW USERS */ + 189, /* (7) cmd ::= SHOW MODULES */ + 189, /* (8) cmd ::= SHOW QUERIES */ + 189, /* (9) cmd ::= SHOW CONNECTIONS */ + 189, /* (10) cmd ::= SHOW STREAMS */ + 189, /* (11) cmd ::= SHOW VARIABLES */ + 189, /* (12) cmd ::= SHOW SCORES */ + 189, /* (13) cmd ::= SHOW GRANTS */ + 189, /* (14) cmd ::= SHOW VNODES */ + 189, /* (15) cmd ::= SHOW VNODES IPTOKEN */ + 190, /* (16) dbPrefix ::= */ + 190, /* (17) dbPrefix ::= ids DOT */ + 192, /* (18) cpxName ::= */ + 192, /* (19) cpxName ::= DOT ids */ + 189, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ + 189, /* (21) cmd ::= SHOW CREATE STABLE ids cpxName */ + 189, /* (22) cmd ::= SHOW CREATE DATABASE ids */ + 189, /* (23) cmd ::= SHOW dbPrefix TABLES */ + 189, /* (24) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + 189, /* (25) cmd ::= SHOW dbPrefix STABLES */ + 189, /* (26) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + 189, /* (27) cmd ::= SHOW dbPrefix VGROUPS */ + 189, /* (28) cmd ::= SHOW dbPrefix VGROUPS ids */ + 189, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ + 189, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ + 189, /* (31) cmd ::= DROP DATABASE ifexists ids */ + 189, /* (32) cmd ::= DROP TOPIC ifexists ids */ + 189, /* (33) cmd ::= DROP DNODE ids */ + 189, /* (34) cmd ::= DROP USER ids */ + 189, /* (35) cmd ::= DROP ACCOUNT ids */ + 189, /* (36) cmd ::= USE ids */ + 189, /* (37) cmd ::= DESCRIBE ids cpxName */ + 189, /* (38) cmd ::= ALTER USER ids PASS ids */ + 189, /* (39) cmd ::= ALTER USER ids PRIVILEGE ids */ + 189, /* (40) cmd ::= ALTER DNODE ids ids */ + 189, /* (41) cmd ::= ALTER DNODE ids ids ids */ + 189, /* (42) cmd ::= ALTER LOCAL ids */ + 189, /* (43) cmd ::= ALTER LOCAL ids ids */ + 189, /* (44) cmd ::= ALTER DATABASE ids alter_db_optr */ + 189, /* (45) cmd ::= ALTER TOPIC ids alter_topic_optr */ + 189, /* (46) cmd ::= ALTER ACCOUNT ids acct_optr */ + 189, /* (47) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + 191, /* (48) ids ::= ID */ + 191, /* (49) ids ::= STRING */ + 193, /* (50) ifexists ::= IF EXISTS */ + 193, /* (51) ifexists ::= */ + 197, /* (52) ifnotexists ::= IF NOT EXISTS */ + 197, /* (53) ifnotexists ::= */ + 189, /* (54) cmd ::= CREATE DNODE ids */ + 189, /* (55) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + 189, /* (56) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + 189, /* (57) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + 189, /* (58) cmd ::= CREATE USER ids PASS ids */ + 200, /* (59) pps ::= */ + 200, /* (60) pps ::= PPS INTEGER */ + 201, /* (61) tseries ::= */ + 201, /* (62) tseries ::= TSERIES INTEGER */ + 202, /* (63) dbs ::= */ + 202, /* (64) dbs ::= DBS INTEGER */ + 203, /* (65) streams ::= */ + 203, /* (66) streams ::= STREAMS INTEGER */ + 204, /* (67) storage ::= */ + 204, /* (68) storage ::= STORAGE INTEGER */ + 205, /* (69) qtime ::= */ + 205, /* (70) qtime ::= QTIME INTEGER */ + 206, /* (71) users ::= */ + 206, /* (72) users ::= USERS INTEGER */ + 207, /* (73) conns ::= */ + 207, /* (74) conns ::= CONNS INTEGER */ + 208, /* (75) state ::= */ + 208, /* (76) state ::= STATE ids */ + 196, /* (77) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + 209, /* (78) keep ::= KEEP tagitemlist */ + 211, /* (79) cache ::= CACHE INTEGER */ + 212, /* (80) replica ::= REPLICA INTEGER */ + 213, /* (81) quorum ::= QUORUM INTEGER */ + 214, /* (82) days ::= DAYS INTEGER */ + 215, /* (83) minrows ::= MINROWS INTEGER */ + 216, /* (84) maxrows ::= MAXROWS INTEGER */ + 217, /* (85) blocks ::= BLOCKS INTEGER */ + 218, /* (86) ctime ::= CTIME INTEGER */ + 219, /* (87) wal ::= WAL INTEGER */ + 220, /* (88) fsync ::= FSYNC INTEGER */ + 221, /* (89) comp ::= COMP INTEGER */ + 222, /* (90) prec ::= PRECISION STRING */ + 223, /* (91) update ::= UPDATE INTEGER */ + 224, /* (92) cachelast ::= CACHELAST INTEGER */ + 225, /* (93) partitions ::= PARTITIONS INTEGER */ + 198, /* (94) db_optr ::= */ + 198, /* (95) db_optr ::= db_optr cache */ + 198, /* (96) db_optr ::= db_optr replica */ + 198, /* (97) db_optr ::= db_optr quorum */ + 198, /* (98) db_optr ::= db_optr days */ + 198, /* (99) db_optr ::= db_optr minrows */ + 198, /* (100) db_optr ::= db_optr maxrows */ + 198, /* (101) db_optr ::= db_optr blocks */ + 198, /* (102) db_optr ::= db_optr ctime */ + 198, /* (103) db_optr ::= db_optr wal */ + 198, /* (104) db_optr ::= db_optr fsync */ + 198, /* (105) db_optr ::= db_optr comp */ + 198, /* (106) db_optr ::= db_optr prec */ + 198, /* (107) db_optr ::= db_optr keep */ + 198, /* (108) db_optr ::= db_optr update */ + 198, /* (109) db_optr ::= db_optr cachelast */ + 199, /* (110) topic_optr ::= db_optr */ + 199, /* (111) topic_optr ::= topic_optr partitions */ + 194, /* (112) alter_db_optr ::= */ + 194, /* (113) alter_db_optr ::= alter_db_optr replica */ + 194, /* (114) alter_db_optr ::= alter_db_optr quorum */ + 194, /* (115) alter_db_optr ::= alter_db_optr keep */ + 194, /* (116) alter_db_optr ::= alter_db_optr blocks */ + 194, /* (117) alter_db_optr ::= alter_db_optr comp */ + 194, /* (118) alter_db_optr ::= alter_db_optr wal */ + 194, /* (119) alter_db_optr ::= alter_db_optr fsync */ + 194, /* (120) alter_db_optr ::= alter_db_optr update */ + 194, /* (121) alter_db_optr ::= alter_db_optr cachelast */ + 195, /* (122) alter_topic_optr ::= alter_db_optr */ + 195, /* (123) alter_topic_optr ::= alter_topic_optr partitions */ + 226, /* (124) typename ::= ids */ + 226, /* (125) typename ::= ids LP signed RP */ + 226, /* (126) typename ::= ids UNSIGNED */ + 227, /* (127) signed ::= INTEGER */ + 227, /* (128) signed ::= PLUS INTEGER */ + 227, /* (129) signed ::= MINUS INTEGER */ + 189, /* (130) cmd ::= CREATE TABLE create_table_args */ + 189, /* (131) cmd ::= CREATE TABLE create_stable_args */ + 189, /* (132) cmd ::= CREATE STABLE create_stable_args */ + 189, /* (133) cmd ::= CREATE TABLE create_table_list */ + 230, /* (134) create_table_list ::= create_from_stable */ + 230, /* (135) create_table_list ::= create_table_list create_from_stable */ + 228, /* (136) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + 229, /* (137) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + 231, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + 231, /* (139) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + 233, /* (140) tagNamelist ::= tagNamelist COMMA ids */ + 233, /* (141) tagNamelist ::= ids */ + 228, /* (142) create_table_args ::= ifnotexists ids cpxName AS select */ + 232, /* (143) columnlist ::= columnlist COMMA column */ + 232, /* (144) columnlist ::= column */ + 235, /* (145) column ::= ids typename */ + 210, /* (146) tagitemlist ::= tagitemlist COMMA tagitem */ + 210, /* (147) tagitemlist ::= tagitem */ + 236, /* (148) tagitem ::= INTEGER */ + 236, /* (149) tagitem ::= FLOAT */ + 236, /* (150) tagitem ::= STRING */ + 236, /* (151) tagitem ::= BOOL */ + 236, /* (152) tagitem ::= NULL */ + 236, /* (153) tagitem ::= MINUS INTEGER */ + 236, /* (154) tagitem ::= MINUS FLOAT */ + 236, /* (155) tagitem ::= PLUS INTEGER */ + 236, /* (156) tagitem ::= PLUS FLOAT */ + 234, /* (157) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + 234, /* (158) select ::= LP select RP */ + 249, /* (159) union ::= select */ + 249, /* (160) union ::= union UNION ALL select */ + 189, /* (161) cmd ::= union */ + 234, /* (162) select ::= SELECT selcollist */ + 250, /* (163) sclp ::= selcollist COMMA */ + 250, /* (164) sclp ::= */ + 237, /* (165) selcollist ::= sclp distinct expr as */ + 237, /* (166) selcollist ::= sclp STAR */ + 253, /* (167) as ::= AS ids */ + 253, /* (168) as ::= ids */ + 253, /* (169) as ::= */ + 251, /* (170) distinct ::= DISTINCT */ + 251, /* (171) distinct ::= */ + 238, /* (172) from ::= FROM tablelist */ + 238, /* (173) from ::= FROM LP union RP */ + 254, /* (174) tablelist ::= ids cpxName */ + 254, /* (175) tablelist ::= ids cpxName ids */ + 254, /* (176) tablelist ::= tablelist COMMA ids cpxName */ + 254, /* (177) tablelist ::= tablelist COMMA ids cpxName ids */ + 255, /* (178) tmvar ::= VARIABLE */ + 240, /* (179) interval_opt ::= INTERVAL LP tmvar RP */ + 240, /* (180) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + 240, /* (181) interval_opt ::= */ + 241, /* (182) session_option ::= */ + 241, /* (183) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 242, /* (184) fill_opt ::= */ + 242, /* (185) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + 242, /* (186) fill_opt ::= FILL LP ID RP */ + 243, /* (187) sliding_opt ::= SLIDING LP tmvar RP */ + 243, /* (188) sliding_opt ::= */ + 245, /* (189) orderby_opt ::= */ + 245, /* (190) orderby_opt ::= ORDER BY sortlist */ + 256, /* (191) sortlist ::= sortlist COMMA item sortorder */ + 256, /* (192) sortlist ::= item sortorder */ + 258, /* (193) item ::= ids cpxName */ + 259, /* (194) sortorder ::= ASC */ + 259, /* (195) sortorder ::= DESC */ + 259, /* (196) sortorder ::= */ + 244, /* (197) groupby_opt ::= */ + 244, /* (198) groupby_opt ::= GROUP BY grouplist */ + 260, /* (199) grouplist ::= grouplist COMMA item */ + 260, /* (200) grouplist ::= item */ + 246, /* (201) having_opt ::= */ + 246, /* (202) having_opt ::= HAVING expr */ + 248, /* (203) limit_opt ::= */ + 248, /* (204) limit_opt ::= LIMIT signed */ + 248, /* (205) limit_opt ::= LIMIT signed OFFSET signed */ + 248, /* (206) limit_opt ::= LIMIT signed COMMA signed */ + 247, /* (207) slimit_opt ::= */ + 247, /* (208) slimit_opt ::= SLIMIT signed */ + 247, /* (209) slimit_opt ::= SLIMIT signed SOFFSET signed */ + 247, /* (210) slimit_opt ::= SLIMIT signed COMMA signed */ + 239, /* (211) where_opt ::= */ + 239, /* (212) where_opt ::= WHERE expr */ + 252, /* (213) expr ::= LP expr RP */ + 252, /* (214) expr ::= ID */ + 252, /* (215) expr ::= ID DOT ID */ + 252, /* (216) expr ::= ID DOT STAR */ + 252, /* (217) expr ::= INTEGER */ + 252, /* (218) expr ::= MINUS INTEGER */ + 252, /* (219) expr ::= PLUS INTEGER */ + 252, /* (220) expr ::= FLOAT */ + 252, /* (221) expr ::= MINUS FLOAT */ + 252, /* (222) expr ::= PLUS FLOAT */ + 252, /* (223) expr ::= STRING */ + 252, /* (224) expr ::= NOW */ + 252, /* (225) expr ::= VARIABLE */ + 252, /* (226) expr ::= PLUS VARIABLE */ + 252, /* (227) expr ::= MINUS VARIABLE */ + 252, /* (228) expr ::= BOOL */ + 252, /* (229) expr ::= NULL */ + 252, /* (230) expr ::= ID LP exprlist RP */ + 252, /* (231) expr ::= ID LP STAR RP */ + 252, /* (232) expr ::= expr IS NULL */ + 252, /* (233) expr ::= expr IS NOT NULL */ + 252, /* (234) expr ::= expr LT expr */ + 252, /* (235) expr ::= expr GT expr */ + 252, /* (236) expr ::= expr LE expr */ + 252, /* (237) expr ::= expr GE expr */ + 252, /* (238) expr ::= expr NE expr */ + 252, /* (239) expr ::= expr EQ expr */ + 252, /* (240) expr ::= expr BETWEEN expr AND expr */ + 252, /* (241) expr ::= expr AND expr */ + 252, /* (242) expr ::= expr OR expr */ + 252, /* (243) expr ::= expr PLUS expr */ + 252, /* (244) expr ::= expr MINUS expr */ + 252, /* (245) expr ::= expr STAR expr */ + 252, /* (246) expr ::= expr SLASH expr */ + 252, /* (247) expr ::= expr REM expr */ + 252, /* (248) expr ::= expr LIKE expr */ + 252, /* (249) expr ::= expr IN LP exprlist RP */ + 261, /* (250) exprlist ::= exprlist COMMA expritem */ + 261, /* (251) exprlist ::= expritem */ + 262, /* (252) expritem ::= expr */ + 262, /* (253) expritem ::= */ + 189, /* (254) cmd ::= RESET QUERY CACHE */ + 189, /* (255) cmd ::= SYNCDB ids REPLICA */ + 189, /* (256) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + 189, /* (257) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + 189, /* (258) cmd ::= ALTER TABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ + 189, /* (259) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + 189, /* (260) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + 189, /* (261) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + 189, /* (262) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + 189, /* (263) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + 189, /* (264) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + 189, /* (265) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 189, /* (266) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 189, /* (267) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 189, /* (268) cmd ::= KILL CONNECTION INTEGER */ + 189, /* (269) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 189, /* (270) cmd ::= KILL QUERY INTEGER COLON INTEGER */ +}; + +/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number +** of symbols on the right-hand side of that rule. */ +static const signed char yyRuleInfoNRhs[] = { + -1, /* (0) program ::= cmd */ + -2, /* (1) cmd ::= SHOW DATABASES */ + -2, /* (2) cmd ::= SHOW TOPICS */ + -2, /* (3) cmd ::= SHOW MNODES */ + -2, /* (4) cmd ::= SHOW DNODES */ + -2, /* (5) cmd ::= SHOW ACCOUNTS */ + -2, /* (6) cmd ::= SHOW USERS */ + -2, /* (7) cmd ::= SHOW MODULES */ + -2, /* (8) cmd ::= SHOW QUERIES */ + -2, /* (9) cmd ::= SHOW CONNECTIONS */ + -2, /* (10) cmd ::= SHOW STREAMS */ + -2, /* (11) cmd ::= SHOW VARIABLES */ + -2, /* (12) cmd ::= SHOW SCORES */ + -2, /* (13) cmd ::= SHOW GRANTS */ + -2, /* (14) cmd ::= SHOW VNODES */ + -3, /* (15) cmd ::= SHOW VNODES IPTOKEN */ + 0, /* (16) dbPrefix ::= */ + -2, /* (17) dbPrefix ::= ids DOT */ + 0, /* (18) cpxName ::= */ + -2, /* (19) cpxName ::= DOT ids */ + -5, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ + -5, /* (21) cmd ::= SHOW CREATE STABLE ids cpxName */ + -4, /* (22) cmd ::= SHOW CREATE DATABASE ids */ + -3, /* (23) cmd ::= SHOW dbPrefix TABLES */ + -5, /* (24) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + -3, /* (25) cmd ::= SHOW dbPrefix STABLES */ + -5, /* (26) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + -3, /* (27) cmd ::= SHOW dbPrefix VGROUPS */ + -4, /* (28) cmd ::= SHOW dbPrefix VGROUPS ids */ + -5, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ + -5, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ + -4, /* (31) cmd ::= DROP DATABASE ifexists ids */ + -4, /* (32) cmd ::= DROP TOPIC ifexists ids */ + -3, /* (33) cmd ::= DROP DNODE ids */ + -3, /* (34) cmd ::= DROP USER ids */ + -3, /* (35) cmd ::= DROP ACCOUNT ids */ + -2, /* (36) cmd ::= USE ids */ + -3, /* (37) cmd ::= DESCRIBE ids cpxName */ + -5, /* (38) cmd ::= ALTER USER ids PASS ids */ + -5, /* (39) cmd ::= ALTER USER ids PRIVILEGE ids */ + -4, /* (40) cmd ::= ALTER DNODE ids ids */ + -5, /* (41) cmd ::= ALTER DNODE ids ids ids */ + -3, /* (42) cmd ::= ALTER LOCAL ids */ + -4, /* (43) cmd ::= ALTER LOCAL ids ids */ + -4, /* (44) cmd ::= ALTER DATABASE ids alter_db_optr */ + -4, /* (45) cmd ::= ALTER TOPIC ids alter_topic_optr */ + -4, /* (46) cmd ::= ALTER ACCOUNT ids acct_optr */ + -6, /* (47) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + -1, /* (48) ids ::= ID */ + -1, /* (49) ids ::= STRING */ + -2, /* (50) ifexists ::= IF EXISTS */ + 0, /* (51) ifexists ::= */ + -3, /* (52) ifnotexists ::= IF NOT EXISTS */ + 0, /* (53) ifnotexists ::= */ + -3, /* (54) cmd ::= CREATE DNODE ids */ + -6, /* (55) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + -5, /* (56) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + -5, /* (57) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + -5, /* (58) cmd ::= CREATE USER ids PASS ids */ + 0, /* (59) pps ::= */ + -2, /* (60) pps ::= PPS INTEGER */ + 0, /* (61) tseries ::= */ + -2, /* (62) tseries ::= TSERIES INTEGER */ + 0, /* (63) dbs ::= */ + -2, /* (64) dbs ::= DBS INTEGER */ + 0, /* (65) streams ::= */ + -2, /* (66) streams ::= STREAMS INTEGER */ + 0, /* (67) storage ::= */ + -2, /* (68) storage ::= STORAGE INTEGER */ + 0, /* (69) qtime ::= */ + -2, /* (70) qtime ::= QTIME INTEGER */ + 0, /* (71) users ::= */ + -2, /* (72) users ::= USERS INTEGER */ + 0, /* (73) conns ::= */ + -2, /* (74) conns ::= CONNS INTEGER */ + 0, /* (75) state ::= */ + -2, /* (76) state ::= STATE ids */ + -9, /* (77) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + -2, /* (78) keep ::= KEEP tagitemlist */ + -2, /* (79) cache ::= CACHE INTEGER */ + -2, /* (80) replica ::= REPLICA INTEGER */ + -2, /* (81) quorum ::= QUORUM INTEGER */ + -2, /* (82) days ::= DAYS INTEGER */ + -2, /* (83) minrows ::= MINROWS INTEGER */ + -2, /* (84) maxrows ::= MAXROWS INTEGER */ + -2, /* (85) blocks ::= BLOCKS INTEGER */ + -2, /* (86) ctime ::= CTIME INTEGER */ + -2, /* (87) wal ::= WAL INTEGER */ + -2, /* (88) fsync ::= FSYNC INTEGER */ + -2, /* (89) comp ::= COMP INTEGER */ + -2, /* (90) prec ::= PRECISION STRING */ + -2, /* (91) update ::= UPDATE INTEGER */ + -2, /* (92) cachelast ::= CACHELAST INTEGER */ + -2, /* (93) partitions ::= PARTITIONS INTEGER */ + 0, /* (94) db_optr ::= */ + -2, /* (95) db_optr ::= db_optr cache */ + -2, /* (96) db_optr ::= db_optr replica */ + -2, /* (97) db_optr ::= db_optr quorum */ + -2, /* (98) db_optr ::= db_optr days */ + -2, /* (99) db_optr ::= db_optr minrows */ + -2, /* (100) db_optr ::= db_optr maxrows */ + -2, /* (101) db_optr ::= db_optr blocks */ + -2, /* (102) db_optr ::= db_optr ctime */ + -2, /* (103) db_optr ::= db_optr wal */ + -2, /* (104) db_optr ::= db_optr fsync */ + -2, /* (105) db_optr ::= db_optr comp */ + -2, /* (106) db_optr ::= db_optr prec */ + -2, /* (107) db_optr ::= db_optr keep */ + -2, /* (108) db_optr ::= db_optr update */ + -2, /* (109) db_optr ::= db_optr cachelast */ + -1, /* (110) topic_optr ::= db_optr */ + -2, /* (111) topic_optr ::= topic_optr partitions */ + 0, /* (112) alter_db_optr ::= */ + -2, /* (113) alter_db_optr ::= alter_db_optr replica */ + -2, /* (114) alter_db_optr ::= alter_db_optr quorum */ + -2, /* (115) alter_db_optr ::= alter_db_optr keep */ + -2, /* (116) alter_db_optr ::= alter_db_optr blocks */ + -2, /* (117) alter_db_optr ::= alter_db_optr comp */ + -2, /* (118) alter_db_optr ::= alter_db_optr wal */ + -2, /* (119) alter_db_optr ::= alter_db_optr fsync */ + -2, /* (120) alter_db_optr ::= alter_db_optr update */ + -2, /* (121) alter_db_optr ::= alter_db_optr cachelast */ + -1, /* (122) alter_topic_optr ::= alter_db_optr */ + -2, /* (123) alter_topic_optr ::= alter_topic_optr partitions */ + -1, /* (124) typename ::= ids */ + -4, /* (125) typename ::= ids LP signed RP */ + -2, /* (126) typename ::= ids UNSIGNED */ + -1, /* (127) signed ::= INTEGER */ + -2, /* (128) signed ::= PLUS INTEGER */ + -2, /* (129) signed ::= MINUS INTEGER */ + -3, /* (130) cmd ::= CREATE TABLE create_table_args */ + -3, /* (131) cmd ::= CREATE TABLE create_stable_args */ + -3, /* (132) cmd ::= CREATE STABLE create_stable_args */ + -3, /* (133) cmd ::= CREATE TABLE create_table_list */ + -1, /* (134) create_table_list ::= create_from_stable */ + -2, /* (135) create_table_list ::= create_table_list create_from_stable */ + -6, /* (136) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + -10, /* (137) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + -10, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + -13, /* (139) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + -3, /* (140) tagNamelist ::= tagNamelist COMMA ids */ + -1, /* (141) tagNamelist ::= ids */ + -5, /* (142) create_table_args ::= ifnotexists ids cpxName AS select */ + -3, /* (143) columnlist ::= columnlist COMMA column */ + -1, /* (144) columnlist ::= column */ + -2, /* (145) column ::= ids typename */ + -3, /* (146) tagitemlist ::= tagitemlist COMMA tagitem */ + -1, /* (147) tagitemlist ::= tagitem */ + -1, /* (148) tagitem ::= INTEGER */ + -1, /* (149) tagitem ::= FLOAT */ + -1, /* (150) tagitem ::= STRING */ + -1, /* (151) tagitem ::= BOOL */ + -1, /* (152) tagitem ::= NULL */ + -2, /* (153) tagitem ::= MINUS INTEGER */ + -2, /* (154) tagitem ::= MINUS FLOAT */ + -2, /* (155) tagitem ::= PLUS INTEGER */ + -2, /* (156) tagitem ::= PLUS FLOAT */ + -13, /* (157) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + -3, /* (158) select ::= LP select RP */ + -1, /* (159) union ::= select */ + -4, /* (160) union ::= union UNION ALL select */ + -1, /* (161) cmd ::= union */ + -2, /* (162) select ::= SELECT selcollist */ + -2, /* (163) sclp ::= selcollist COMMA */ + 0, /* (164) sclp ::= */ + -4, /* (165) selcollist ::= sclp distinct expr as */ + -2, /* (166) selcollist ::= sclp STAR */ + -2, /* (167) as ::= AS ids */ + -1, /* (168) as ::= ids */ + 0, /* (169) as ::= */ + -1, /* (170) distinct ::= DISTINCT */ + 0, /* (171) distinct ::= */ + -2, /* (172) from ::= FROM tablelist */ + -4, /* (173) from ::= FROM LP union RP */ + -2, /* (174) tablelist ::= ids cpxName */ + -3, /* (175) tablelist ::= ids cpxName ids */ + -4, /* (176) tablelist ::= tablelist COMMA ids cpxName */ + -5, /* (177) tablelist ::= tablelist COMMA ids cpxName ids */ + -1, /* (178) tmvar ::= VARIABLE */ + -4, /* (179) interval_opt ::= INTERVAL LP tmvar RP */ + -6, /* (180) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + 0, /* (181) interval_opt ::= */ + 0, /* (182) session_option ::= */ + -7, /* (183) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + 0, /* (184) fill_opt ::= */ + -6, /* (185) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + -4, /* (186) fill_opt ::= FILL LP ID RP */ + -4, /* (187) sliding_opt ::= SLIDING LP tmvar RP */ + 0, /* (188) sliding_opt ::= */ + 0, /* (189) orderby_opt ::= */ + -3, /* (190) orderby_opt ::= ORDER BY sortlist */ + -4, /* (191) sortlist ::= sortlist COMMA item sortorder */ + -2, /* (192) sortlist ::= item sortorder */ + -2, /* (193) item ::= ids cpxName */ + -1, /* (194) sortorder ::= ASC */ + -1, /* (195) sortorder ::= DESC */ + 0, /* (196) sortorder ::= */ + 0, /* (197) groupby_opt ::= */ + -3, /* (198) groupby_opt ::= GROUP BY grouplist */ + -3, /* (199) grouplist ::= grouplist COMMA item */ + -1, /* (200) grouplist ::= item */ + 0, /* (201) having_opt ::= */ + -2, /* (202) having_opt ::= HAVING expr */ + 0, /* (203) limit_opt ::= */ + -2, /* (204) limit_opt ::= LIMIT signed */ + -4, /* (205) limit_opt ::= LIMIT signed OFFSET signed */ + -4, /* (206) limit_opt ::= LIMIT signed COMMA signed */ + 0, /* (207) slimit_opt ::= */ + -2, /* (208) slimit_opt ::= SLIMIT signed */ + -4, /* (209) slimit_opt ::= SLIMIT signed SOFFSET signed */ + -4, /* (210) slimit_opt ::= SLIMIT signed COMMA signed */ + 0, /* (211) where_opt ::= */ + -2, /* (212) where_opt ::= WHERE expr */ + -3, /* (213) expr ::= LP expr RP */ + -1, /* (214) expr ::= ID */ + -3, /* (215) expr ::= ID DOT ID */ + -3, /* (216) expr ::= ID DOT STAR */ + -1, /* (217) expr ::= INTEGER */ + -2, /* (218) expr ::= MINUS INTEGER */ + -2, /* (219) expr ::= PLUS INTEGER */ + -1, /* (220) expr ::= FLOAT */ + -2, /* (221) expr ::= MINUS FLOAT */ + -2, /* (222) expr ::= PLUS FLOAT */ + -1, /* (223) expr ::= STRING */ + -1, /* (224) expr ::= NOW */ + -1, /* (225) expr ::= VARIABLE */ + -2, /* (226) expr ::= PLUS VARIABLE */ + -2, /* (227) expr ::= MINUS VARIABLE */ + -1, /* (228) expr ::= BOOL */ + -1, /* (229) expr ::= NULL */ + -4, /* (230) expr ::= ID LP exprlist RP */ + -4, /* (231) expr ::= ID LP STAR RP */ + -3, /* (232) expr ::= expr IS NULL */ + -4, /* (233) expr ::= expr IS NOT NULL */ + -3, /* (234) expr ::= expr LT expr */ + -3, /* (235) expr ::= expr GT expr */ + -3, /* (236) expr ::= expr LE expr */ + -3, /* (237) expr ::= expr GE expr */ + -3, /* (238) expr ::= expr NE expr */ + -3, /* (239) expr ::= expr EQ expr */ + -5, /* (240) expr ::= expr BETWEEN expr AND expr */ + -3, /* (241) expr ::= expr AND expr */ + -3, /* (242) expr ::= expr OR expr */ + -3, /* (243) expr ::= expr PLUS expr */ + -3, /* (244) expr ::= expr MINUS expr */ + -3, /* (245) expr ::= expr STAR expr */ + -3, /* (246) expr ::= expr SLASH expr */ + -3, /* (247) expr ::= expr REM expr */ + -3, /* (248) expr ::= expr LIKE expr */ + -5, /* (249) expr ::= expr IN LP exprlist RP */ + -3, /* (250) exprlist ::= exprlist COMMA expritem */ + -1, /* (251) exprlist ::= expritem */ + -1, /* (252) expritem ::= expr */ + 0, /* (253) expritem ::= */ + -3, /* (254) cmd ::= RESET QUERY CACHE */ + -3, /* (255) cmd ::= SYNCDB ids REPLICA */ + -7, /* (256) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (257) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + -9, /* (258) cmd ::= ALTER TABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ + -7, /* (259) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + -7, /* (260) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + -8, /* (261) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (262) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (263) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (264) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + -7, /* (265) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (266) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (267) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -3, /* (268) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (269) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (270) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2039,30 +2333,34 @@ static void yy_accept(yyParser*); /* Forward Declaration */ ** only called from one place, optimizing compilers will in-line it, which ** means that the extra parameters have no performance impact. */ -static void yy_reduce( +static YYACTIONTYPE yy_reduce( yyParser *yypParser, /* The parser */ unsigned int yyruleno, /* Number of the rule by which to reduce */ int yyLookahead, /* Lookahead token, or YYNOCODE if none */ ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */ + ParseCTX_PDECL /* %extra_context */ ){ int yygoto; /* The next state */ - int yyact; /* The next action */ + YYACTIONTYPE yyact; /* The next action */ yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ - ParseARG_FETCH; + ParseARG_FETCH (void)yyLookahead; (void)yyLookaheadToken; yymsp = yypParser->yytos; #ifndef NDEBUG if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - yysize = yyRuleInfo[yyruleno].nrhs; + yysize = yyRuleInfoNRhs[yyruleno]; if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n", + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", yyTracePrompt, - yyruleno, yyRuleName[yyruleno], yymsp[yysize].stateno); + yyruleno, yyRuleName[yyruleno], + yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ yypParser->yyhwm++; @@ -2080,13 +2378,19 @@ static void yy_reduce( #if YYSTACKDEPTH>0 if( yypParser->yytos>=yypParser->yystackEnd ){ yyStackOverflow(yypParser); - return; + /* The call to yyStackOverflow() above pops the stack until it is + ** empty, causing the main parser loop to exit. So the return value + ** is never used and does not matter. */ + return 0; } #else if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); - return; + /* The call to yyStackOverflow() above pops the stack until it is + ** empty, causing the main parser loop to exit. So the return value + ** is never used and does not matter. */ + return 0; } yymsp = yypParser->yytos; } @@ -3010,14 +3314,27 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 258: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 258: /* cmd ::= ALTER TABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ +{ + yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; + + toTSDBType(yymsp[-1].minor.yy0.type); + SArray* K = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + toTSDBType(yymsp[0].minor.yy0.type); + K = tVariantListAppendToken(K, &yymsp[0].minor.yy0, -1); + + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, K, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + break; + case 259: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 259: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 260: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3028,7 +3345,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 260: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 261: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3042,7 +3359,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 261: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 262: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -3054,14 +3371,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 262: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 263: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 263: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 264: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3072,14 +3389,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 264: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 265: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 265: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 266: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3090,7 +3407,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 266: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 267: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3104,22 +3421,22 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 267: /* cmd ::= KILL CONNECTION INTEGER */ + case 268: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 268: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 269: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 269: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 270: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: break; /********** End reduce actions ************************************************/ }; - assert( yyrulenostateno = (YYACTIONTYPE)yyact; yymsp->major = (YYCODETYPE)yygoto; yyTraceShift(yypParser, yyact, "... then shift"); + return yyact; } /* @@ -3143,7 +3461,8 @@ static void yy_reduce( static void yy_parse_failed( yyParser *yypParser /* The parser */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); @@ -3154,7 +3473,8 @@ static void yy_parse_failed( ** parser fails */ /************ Begin %parse_failure code ***************************************/ /************ End %parse_failure code *****************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } #endif /* YYNOERRORRECOVERY */ @@ -3166,7 +3486,8 @@ static void yy_syntax_error( int yymajor, /* The major type of the error token */ ParseTOKENTYPE yyminor /* The minor type of the error token */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #define TOKEN yyminor /************ Begin %syntax_error code ****************************************/ @@ -3192,7 +3513,8 @@ static void yy_syntax_error( assert(len <= outputBufLen); /************ End %syntax_error code ******************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } /* @@ -3201,7 +3523,8 @@ static void yy_syntax_error( static void yy_accept( yyParser *yypParser /* The parser */ ){ - ParseARG_FETCH; + ParseARG_FETCH + ParseCTX_FETCH #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); @@ -3216,7 +3539,8 @@ static void yy_accept( /*********** Begin %parse_accept code *****************************************/ /*********** End %parse_accept code *******************************************/ - ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */ + ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ + ParseCTX_STORE } /* The main parser program. @@ -3245,45 +3569,47 @@ void Parse( ParseARG_PDECL /* Optional %extra_argument parameter */ ){ YYMINORTYPE yyminorunion; - unsigned int yyact; /* The parser action. */ + YYACTIONTYPE yyact; /* The parser action. */ #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) int yyendofinput; /* True if we are at the end of input */ #endif #ifdef YYERRORSYMBOL int yyerrorhit = 0; /* True if yymajor has invoked an error */ #endif - yyParser *yypParser; /* The parser */ + yyParser *yypParser = (yyParser*)yyp; /* The parser */ + ParseCTX_FETCH + ParseARG_STORE - yypParser = (yyParser*)yyp; assert( yypParser->yytos!=0 ); #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) yyendofinput = (yymajor==0); #endif - ParseARG_STORE; + yyact = yypParser->yytos->stateno; #ifndef NDEBUG if( yyTraceFILE ){ - int stateno = yypParser->yytos->stateno; - if( stateno < YY_MIN_REDUCE ){ + if( yyact < YY_MIN_REDUCE ){ fprintf(yyTraceFILE,"%sInput '%s' in state %d\n", - yyTracePrompt,yyTokenName[yymajor],stateno); + yyTracePrompt,yyTokenName[yymajor],yyact); }else{ fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n", - yyTracePrompt,yyTokenName[yymajor],stateno-YY_MIN_REDUCE); + yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE); } } #endif do{ - yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); + assert( yyact==yypParser->yytos->stateno ); + yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); if( yyact >= YY_MIN_REDUCE ){ - yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,yyminor); + yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor, + yyminor ParseCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ - yy_shift(yypParser,yyact,yymajor,yyminor); + yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); #ifndef YYNOERRORRECOVERY yypParser->yyerrcnt--; #endif - yymajor = YYNOCODE; + break; }else if( yyact==YY_ACCEPT_ACTION ){ yypParser->yytos--; yy_accept(yypParser); @@ -3334,10 +3660,9 @@ void Parse( yymajor = YYNOCODE; }else{ while( yypParser->yytos >= yypParser->yystack - && yymx != YYERRORSYMBOL && (yyact = yy_find_reduce_action( yypParser->yytos->stateno, - YYERRORSYMBOL)) >= YY_MIN_REDUCE + YYERRORSYMBOL)) > YY_MAX_SHIFTREDUCE ){ yy_pop_parser_stack(yypParser); } @@ -3354,6 +3679,8 @@ void Parse( } yypParser->yyerrcnt = 3; yyerrorhit = 1; + if( yymajor==YYNOCODE ) break; + yyact = yypParser->yytos->stateno; #elif defined(YYNOERRORRECOVERY) /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to ** do any kind of error recovery. Instead, simply invoke the syntax @@ -3364,8 +3691,7 @@ void Parse( */ yy_syntax_error(yypParser,yymajor, yyminor); yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); - yymajor = YYNOCODE; - + break; #else /* YYERRORSYMBOL is not defined */ /* This is what we do if the grammar does not define ERROR: ** @@ -3387,10 +3713,10 @@ void Parse( yypParser->yyerrcnt = -1; #endif } - yymajor = YYNOCODE; + break; #endif } - }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack ); + }while( yypParser->yytos>yypParser->yystack ); #ifndef NDEBUG if( yyTraceFILE ){ yyStackEntry *i; @@ -3405,3 +3731,17 @@ void Parse( #endif return; } + +/* +** Return the fallback token corresponding to canonical token iToken, or +** 0 if iToken has no fallback. +*/ +int ParseFallback(int iToken){ +#ifdef YYFALLBACK + assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) ); + return yyFallback[iToken]; +#else + (void)iToken; + return 0; +#endif +} diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 93d4570ea8..3d917f2393 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -217,7 +217,8 @@ static SKeyword keywordTable[] = { {"DISTINCT", TK_DISTINCT}, {"PARTITIONS", TK_PARTITIONS}, {"TOPIC", TK_TOPIC}, - {"TOPICS", TK_TOPICS} + {"TOPICS", TK_TOPICS}, + {"LENGTH", TK_LENGTH} }; static const char isIdChar[] = { From 24cf44583e7f9802164ee415532c963d4e38b618 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 28 May 2021 19:41:47 +0800 Subject: [PATCH 30/82] [TD-4417]: return database not ready if vnode status & role not ready --- src/vnode/src/vnodeWrite.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 16089c8e91..555eda6d13 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -303,6 +303,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) { + SVnodeObj *pVnode = vparam; + if (qtype == TAOS_QTYPE_RPC) { + if (!vnodeInReadyStatus(pVnode)) { + return TSDB_CODE_APP_NOT_READY; // it may be in deleting or closing state + } + + if (pVnode->role != TAOS_SYNC_ROLE_MASTER) { + return TSDB_CODE_APP_NOT_READY; + } + } + SVWriteMsg *pWrite = vnodeBuildVWriteMsg(vparam, wparam, qtype, rparam); if (pWrite == NULL) { assert(terrno != 0); From 921b36c7b1a66c1eee8780a8a7bdd7b640828b85 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 29 May 2021 07:31:35 +0800 Subject: [PATCH 31/82] [TD-4406]: taosdemo auto create table. (#6281) * [TD-4406]: taosdemo auto create table. * fix child table exists or auto create logic. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 230 +++++++++++++++++++----------------- 1 file changed, 123 insertions(+), 107 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 70ddbacf44..406544b306 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -255,12 +255,13 @@ typedef struct SColumn_S { typedef struct SSuperTable_S { char sTblName[MAX_TB_NAME_SIZE+1]; + char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample + char childTblPrefix[MAX_TB_NAME_SIZE]; + char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest + uint16_t childTblExists; int64_t childTblCount; - bool childTblExists; // 0: no, 1: yes uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table - char childTblPrefix[MAX_TB_NAME_SIZE]; - char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample uint16_t iface; // 0: taosc, 1: rest, 2: stmt int64_t childTblLimit; uint64_t childTblOffset; @@ -830,7 +831,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if ((argc == i+1) || (!isStringNumber(argv[i+1]))) { printHelp(); - errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, 1: ASYNC. Default is SYNC.\n"); + errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, not-0: ASYNC. Default is SYNC.\n"); exit(EXIT_FAILURE); } arguments->async_mode = atoi(argv[++i]); @@ -1471,7 +1472,8 @@ static int printfInsertMeta() { if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); - } else if (AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + } else if (AUTO_CREATE_SUBTBL == + g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); } else { printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); @@ -3063,64 +3065,61 @@ static void createChildTables() { char tblColsBuf[MAX_SQL_SIZE]; int len; - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.use_metric) { - if (g_Dbs.db[i].superTblCount > 0) { - // with super table - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) - || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.use_metric) { + if (g_Dbs.db[i].superTblCount > 0) { + // with super table + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) + || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { + continue; + } + verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + uint64_t startFrom = 0; + g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; - verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - uint64_t tableFrom = 0; - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; + verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n", + __func__, __LINE__, g_totalChildTables, startFrom); - verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n", - __func__, __LINE__, g_totalChildTables, tableFrom); - startMultiThreadCreateChildTable( - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, - g_Dbs.threadCountByCreateTbl, - tableFrom, - g_Dbs.db[i].superTbls[j].childTblCount, - g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); + startMultiThreadCreateChildTable( + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, + g_Dbs.threadCountByCreateTbl, + startFrom, + g_Dbs.db[i].superTbls[j].childTblCount, + g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); + } + } + } else { + // normal table + len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); + for (int j = 0; j < g_args.num_of_CPR; j++) { + if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.datatype[j], + "NCHAR", strlen("NCHAR")) == 0)) { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); + } else { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s", j, g_args.datatype[j]); + } + len = strlen(tblColsBuf); + } + + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); + + verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", + __func__, __LINE__, + g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); + startMultiThreadCreateChildTable( + tblColsBuf, + g_Dbs.threadCountByCreateTbl, + 0, + g_args.num_of_tables, + g_Dbs.db[i].dbName, + NULL); } - } - } else { - // normal table - len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); - for (int j = 0; j < g_args.num_of_CPR; j++) { - if (g_args.datatype[j] - && ((strncasecmp(g_args.datatype[j], - "BINARY", strlen("BINARY")) == 0) - || (strncasecmp(g_args.datatype[j], - "NCHAR", strlen("NCHAR")) == 0))) { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(%d)", j, g_args.datatype[j], - g_args.len_of_binary); - } else { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s", j, g_args.datatype[j]); - } - len = strlen(tblColsBuf); - } - - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); - - verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", - __func__, __LINE__, - g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); - startMultiThreadCreateChildTable( - tblColsBuf, - g_Dbs.threadCountByCreateTbl, - 0, - g_args.num_of_tables, - g_Dbs.db[i].dbName, - NULL); } - } } /* @@ -3814,36 +3813,40 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // dbinfo cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); - if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { + if (!stbName || stbName->type != cJSON_String + || stbName->valuestring == NULL) { errorPrint("%s() LN%d, failed to read json, stb name not found\n", __func__, __LINE__); goto PARSE_OVER; } - tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, + MAX_TB_NAME_SIZE); cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { printf("ERROR: failed to read json, childtable_prefix not found\n"); goto PARSE_OVER; } - tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, + MAX_DB_NAME_SIZE); - cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null + cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); if (autoCreateTbl && autoCreateTbl->type == cJSON_String && autoCreateTbl->valuestring != NULL) { - if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; - } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } + if ((0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) + && (TBL_ALREADY_EXISTS != g_Dbs.db[i].superTbls[j].childTblExists)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; + } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } } else if (!autoCreateTbl) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; } else { - printf("ERROR: failed to read json, auto_create_table not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, auto_create_table not found\n"); + goto PARSE_OVER; } cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); @@ -3877,6 +3880,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } + if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } + cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); if (!count || count->type != cJSON_Number || 0 >= count->valueint) { errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n", @@ -3934,7 +3941,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); if ((childTbl_offset) && (g_Dbs.db[i].drop != true) && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { - if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) { + if ((childTbl_offset->type != cJSON_Number) + || (0 > childTbl_offset->valueint)) { printf("ERROR: failed to read json, childtable_offset\n"); goto PARSE_OVER; } @@ -3990,7 +3998,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); - if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) { + if ((tagsFile && tagsFile->type == cJSON_String) + && (tagsFile->valuestring != NULL)) { tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, tagsFile->valuestring, MAX_FILE_NAME_LEN); if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { @@ -4946,26 +4955,29 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq) { - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - if ((superTblInfo) - && (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable)) { - if (superTblInfo->childTblLimit > 0) { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + - (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + if (superTblInfo) { + if (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable) { + if (superTblInfo->childTblLimit > 0) { + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", + superTblInfo->childTblName + + (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + } else { + verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, tableSeq); + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", + superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + } + } else { + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", + superTblInfo->childTblPrefix, tableSeq); + } } else { - - verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, tableSeq); - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", + g_args.tb_prefix, tableSeq); } - } else { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", - g_args.tb_prefix, tableSeq); - } } static int32_t generateDataTailWithoutStb( @@ -5132,7 +5144,8 @@ static int generateStbSQLHead( char headBuf[HEAD_BUFF_LEN]; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + if ((AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) + && (TBL_ALREADY_EXISTS != superTblInfo->childTblExists)) { char* tagsValBuf = NULL; if (0 == superTblInfo->tagSource) { tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq); @@ -6753,7 +6766,6 @@ static int insertTestProcess() { } } - // taosMsleep(1000); // create sub threads for inserting data //start = taosGetTimestampMs(); for (int i = 0; i < g_Dbs.dbCount; i++) { @@ -7397,11 +7409,14 @@ static void *specifiedSubscribe(void *sarg) { g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume( g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]); if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) { - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] + != 0) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - fetchResult(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], pThreadInfo); + fetchResult( + g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], + pThreadInfo); } g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; @@ -7414,16 +7429,17 @@ static void *specifiedSubscribe(void *sarg) { g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID], g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl( - SPECIFIED_CLASS, - pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); + g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = + subscribeImpl( + SPECIFIED_CLASS, + pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], + g_queryInfo.specifiedQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeInterval); if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { - taos_close(pThreadInfo->taos); - return NULL; + taos_close(pThreadInfo->taos); + return NULL; } } } @@ -7636,7 +7652,7 @@ static void setParaFromArg(){ g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; g_Dbs.dbCount = 1; - g_Dbs.db[0].drop = 1; + g_Dbs.db[0].drop = true; tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE); g_Dbs.db[0].dbCfg.replica = g_args.replica; From 5694f03f1afd2abc60409b656c33df8444c3355e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 29 May 2021 08:21:57 +0800 Subject: [PATCH 32/82] Hotfix/sangshuduo/td 3913 mips compile support (#6279) * [TD-3913]: mips compile support. * [TD-3912]: support mips64 compile. add mips64 header file and modify tcrc32c.c for mips64. * [TD-3913]: mips compile support. verified on real loongson machine. * fix cmake. * fix gcc 4.8 compile error. * fix clang compile error. Co-authored-by: Shuduo Sang --- src/os/src/detail/osTime.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c index d9d070218e..67e0c2642e 100644 --- a/src/os/src/detail/osTime.c +++ b/src/os/src/detail/osTime.c @@ -87,12 +87,12 @@ static int32_t (*parseLocaltimeFp[]) (char* timestr, int64_t* time, int32_t time int32_t taosGetTimestampSec() { return (int32_t)time(NULL); } -int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t daylight) { +int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) { /* parse datatime string in with tz */ if (strnchr(timestr, 'T', len, false) != NULL) { return parseTimeWithTz(timestr, time, timePrec); } else { - return (*parseLocaltimeFp[daylight])(timestr, time, timePrec); + return (*parseLocaltimeFp[day_light])(timestr, time, timePrec); } } From 5456f3a0090005365262080e17c75a0f4cc4935b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 29 May 2021 09:12:19 +0800 Subject: [PATCH 33/82] Revert "[TD-4533]: taosdemo resub if resubAfterConsume != -1 (#6243)" (#6289) This reverts commit 0c4075e09fa0c2e40efae77b9ded0a314ad657e2. --- src/kit/taosdemo/taosdemo.c | 52 +++++++++---------------------------- 1 file changed, 12 insertions(+), 40 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 406544b306..a8af72d36a 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -389,7 +389,6 @@ typedef struct SpecifiedQueryInfo_S { char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; int resubAfterConsume[MAX_QUERY_SQL_COUNT]; - int endAfterConsume[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char topic[MAX_QUERY_SQL_COUNT][32]; int consumed[MAX_QUERY_SQL_COUNT]; @@ -412,7 +411,6 @@ typedef struct SuperQueryInfo_S { char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; int resubAfterConsume; - int endAfterConsume; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; @@ -4378,17 +4376,6 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); - cJSON* endAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); - if (endAfterConsume - && endAfterConsume->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] - = endAfterConsume->valueint; - } else if (!endAfterConsume) { - // default value is -1, which mean infinite loop - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - } - cJSON* resubAfterConsume = cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); if (resubAfterConsume @@ -4396,8 +4383,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = resubAfterConsume->valueint; } else if (!resubAfterConsume) { - // default value is -1, which mean do not resub - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = 1; } cJSON *result = cJSON_GetObjectItem(sql, "result"); @@ -4541,26 +4529,16 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; } - cJSON* superEndAfterConsume = - cJSON_GetObjectItem(superQuery, "endAfterConsume"); - if (superEndAfterConsume - && superEndAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = - superEndAfterConsume->valueint; - } else if (!superEndAfterConsume) { - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.endAfterConsume = -1; - } - cJSON* superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "endAfterConsume"); + cJSON_GetObjectItem(superQuery, "resubAfterConsume"); if (superResubAfterConsume && superResubAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = + g_queryInfo.superQueryInfo.resubAfterConsume = superResubAfterConsume->valueint; } else if (!superResubAfterConsume) { - // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.endAfterConsume = -1; + //printf("failed to read json, subscribe interval no found\n"); + ////goto PARSE_OVER; + g_queryInfo.superQueryInfo.resubAfterConsume = 1; } // supert table sqls @@ -7284,10 +7262,7 @@ static void *superSubscribe(void *sarg) { uint64_t st = 0, et = 0; - while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) - || (g_queryInfo.superQueryInfo.endAfterConsume < - consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from])) { - + while(1) { for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { tsubSeq = i - pThreadInfo->start_table_from; @@ -7316,7 +7291,7 @@ static void *superSubscribe(void *sarg) { } consumed[tsubSeq] ++; - if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) + if ((g_queryInfo.superQueryInfo.subscribeKeepProgress) && (consumed[tsubSeq] >= g_queryInfo.superQueryInfo.resubAfterConsume)) { printf("keepProgress:%d, resub super table query: %"PRIu64"\n", @@ -7398,10 +7373,7 @@ static void *specifiedSubscribe(void *sarg) { // start loop to consume result g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1) - || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < - g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) { - + while(1) { if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { continue; } @@ -7420,7 +7392,7 @@ static void *specifiedSubscribe(void *sarg) { } g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; - if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) + if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress) && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >= g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { printf("keepProgress:%d, resub specified query: %"PRIu64"\n", From af7658b00245e239c2943f8d5370023fad35fed1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 29 May 2021 09:32:22 +0800 Subject: [PATCH 34/82] [TD-4353]: taosdemo resub if resubAfterConsume != -1 (#6287) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 72 +++++++++++++++++++++++++------------ 1 file changed, 50 insertions(+), 22 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index a8af72d36a..e7b2d4f16b 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -380,7 +380,7 @@ typedef struct SDbs_S { typedef struct SpecifiedQueryInfo_S { uint64_t queryInterval; // 0: unlimit > 0 loop/s uint32_t concurrent; - uint64_t sqlCount; + int sqlCount; uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms uint64_t queryTimes; @@ -389,6 +389,7 @@ typedef struct SpecifiedQueryInfo_S { char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; int resubAfterConsume[MAX_QUERY_SQL_COUNT]; + int endAfterConsume[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char topic[MAX_QUERY_SQL_COUNT][32]; int consumed[MAX_QUERY_SQL_COUNT]; @@ -407,10 +408,11 @@ typedef struct SuperQueryInfo_S { uint64_t queryTimes; int64_t childTblCount; char childTblPrefix[MAX_TB_NAME_SIZE]; - uint64_t sqlCount; + int sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; int resubAfterConsume; + int endAfterConsume; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; char* childTblName; @@ -1775,7 +1777,7 @@ static void printfQueryMeta() { if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) { printf("specified table query info: \n"); - printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { printf("specified tbl query times:\n"); @@ -1795,15 +1797,15 @@ static void printfQueryMeta() { printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - for (uint64_t i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%"PRIu64"]: \033[33m%s\033[0m\n", + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); } printf("super table query info:\n"); - printf("sqlCount: \033[33m%"PRIu64"\033[0m\n", + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); if (g_queryInfo.superQueryInfo.sqlCount > 0) { @@ -4277,7 +4279,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (concurrent && concurrent->type == cJSON_Number) { if (concurrent->valueint <= 0) { errorPrint( - "%s() LN%d, query sqlCount %"PRIu64" or concurrent %d is not correct.\n", + "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); @@ -4376,6 +4378,17 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + cJSON* endAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); + if (endAfterConsume + && endAfterConsume->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] + = endAfterConsume->valueint; + } else if (!endAfterConsume) { + // default value is -1, which mean infinite loop + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + } + cJSON* resubAfterConsume = cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); if (resubAfterConsume @@ -4383,9 +4396,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = resubAfterConsume->valueint; } else if (!resubAfterConsume) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = 1; + // default value is -1, which mean do not resub + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; } cJSON *result = cJSON_GetObjectItem(sql, "result"); @@ -4529,16 +4541,26 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; } + cJSON* superEndAfterConsume = + cJSON_GetObjectItem(superQuery, "endAfterConsume"); + if (superEndAfterConsume + && superEndAfterConsume->type == cJSON_Number) { + g_queryInfo.superQueryInfo.endAfterConsume = + superEndAfterConsume->valueint; + } else if (!superEndAfterConsume) { + // default value is -1, which mean do not resub + g_queryInfo.superQueryInfo.endAfterConsume = -1; + } + cJSON* superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "resubAfterConsume"); + cJSON_GetObjectItem(superQuery, "endAfterConsume"); if (superResubAfterConsume && superResubAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.resubAfterConsume = + g_queryInfo.superQueryInfo.endAfterConsume = superResubAfterConsume->valueint; } else if (!superResubAfterConsume) { - //printf("failed to read json, subscribe interval no found\n"); - ////goto PARSE_OVER; - g_queryInfo.superQueryInfo.resubAfterConsume = 1; + // default value is -1, which mean do not resub + g_queryInfo.superQueryInfo.endAfterConsume = -1; } // supert table sqls @@ -7262,7 +7284,10 @@ static void *superSubscribe(void *sarg) { uint64_t st = 0, et = 0; - while(1) { + while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) + || (g_queryInfo.superQueryInfo.endAfterConsume < + consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from])) { + for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { tsubSeq = i - pThreadInfo->start_table_from; @@ -7291,7 +7316,7 @@ static void *superSubscribe(void *sarg) { } consumed[tsubSeq] ++; - if ((g_queryInfo.superQueryInfo.subscribeKeepProgress) + if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) && (consumed[tsubSeq] >= g_queryInfo.superQueryInfo.resubAfterConsume)) { printf("keepProgress:%d, resub super table query: %"PRIu64"\n", @@ -7373,7 +7398,10 @@ static void *specifiedSubscribe(void *sarg) { // start loop to consume result g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - while(1) { + while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1) + || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < + g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) { + if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { continue; } @@ -7392,7 +7420,7 @@ static void *specifiedSubscribe(void *sarg) { } g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; - if ((g_queryInfo.specifiedQueryInfo.subscribeKeepProgress) + if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >= g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { printf("keepProgress:%d, resub specified query: %"PRIu64"\n", @@ -7460,12 +7488,12 @@ static int subscribeTestProcess() { //==== create threads for query for specified table if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", + debugPrint("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); } else { if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, sepcified query sqlCount %"PRIu64".\n", + errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); exit(-1); @@ -7498,7 +7526,7 @@ static int subscribeTestProcess() { //==== create threads for super table query if (g_queryInfo.superQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, super table query sqlCount %"PRIu64".\n", + debugPrint("%s() LN%d, super table query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.superQueryInfo.sqlCount); } else { From 458405cc6381c0436fb4c6c4f4c0eab2c6431481 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Sat, 29 May 2021 18:52:56 +0800 Subject: [PATCH 35/82] [TD-4423]: add test case --- tests/pytest/insert/nchar.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/pytest/insert/nchar.py b/tests/pytest/insert/nchar.py index 3319aa3c56..5ad52b96a1 100644 --- a/tests/pytest/insert/nchar.py +++ b/tests/pytest/insert/nchar.py @@ -36,6 +36,10 @@ class TDTestCase: tdSql.checkData(1, 1, '涛思数据') tdSql.error("insert into tb values (now, 'taosdata001')") + + tdSql.error("insert into tb(now, 😀)") + tdSql.query("select * from tb") + tdSql.checkRows(2) def stop(self): tdSql.close() From daa52050521df5726cd55edc5513c1b1db9d8dd5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 30 May 2021 07:02:30 +0800 Subject: [PATCH 36/82] Hotfix/sangshuduo/td 4406 taosdemo auto create tables (#6294) * [TD-4406]: taosdemo auto create table. * fix child table exists or auto create logic. * reduce redundant conditions. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index e7b2d4f16b..c072045d56 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5144,8 +5144,7 @@ static int generateStbSQLHead( char headBuf[HEAD_BUFF_LEN]; - if ((AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) - && (TBL_ALREADY_EXISTS != superTblInfo->childTblExists)) { + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { char* tagsValBuf = NULL; if (0 == superTblInfo->tagSource) { tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq); From 7946d97e3310884c173ed1532df9ee1c8a4c6004 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 30 May 2021 07:03:23 +0800 Subject: [PATCH 37/82] Feature/sangshuduo/td 4068 taosdemo stmt for master (#6296) * [TD-4068]: taosdemo support stmt. for easy merge purpose. disabled in master. * fix clang compile error. * fix memory leak, add more macros. change sqlcount to int Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c072045d56..f0b8cf7ca9 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -693,7 +693,11 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-p", indent, "The TCP/IP port number to use for the connection. Default is 0."); printf("%s%s%s%s\n", indent, "-I", indent, +#if STMT_IFACE_ENABLED == 1 "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'."); +#else + "The interface (taosc, rest) taosdemo uses. Default is 'taosc'."); +#endif printf("%s%s%s%s\n", indent, "-d", indent, "Destination database. Default is 'test'."); printf("%s%s%s%s\n", indent, "-a", indent, @@ -793,8 +797,10 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->iface = TAOSC_IFACE; } else if (0 == strcasecmp(argv[i], "rest")) { arguments->iface = REST_IFACE; +#if STMT_IFACE_ENABLED == 1 } else if (0 == strcasecmp(argv[i], "stmt")) { arguments->iface = STMT_IFACE; +#endif } else { errorPrint("%s", "\n\t-I need a valid string following!\n"); exit(EXIT_FAILURE); @@ -3912,8 +3918,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE; } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { g_Dbs.db[i].superTbls[j].iface= REST_IFACE; +#if STMT_IFACE_ENABLED == 1 } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; +#endif } else { errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n", __func__, __LINE__, stbIface->valuestring); @@ -4933,6 +4941,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) } break; +#if STMT_IFACE_ENABLED == 1 case STMT_IFACE: debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt); if (0 != taos_stmt_execute(pThreadInfo->stmt)) { @@ -4942,6 +4951,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) } affectedRows = k; break; +#endif default: errorPrint("%s() LN%d: unknown insert mode: %d\n", @@ -5506,6 +5516,7 @@ static int32_t prepareStmtWithoutStb( } } + free(bindArray); return k; } @@ -5586,6 +5597,7 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo, } } + free(bindArray); return k; } #endif @@ -6407,6 +6419,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } +#if STMT_IFACE_ENABLED == 1 if ((g_args.iface == STMT_IFACE) || ((superTblInfo) && (superTblInfo->iface == STMT_IFACE))) { @@ -6446,6 +6459,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } } +#endif } else { pThreadInfo->taos = NULL; } @@ -6486,9 +6500,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, tsem_destroy(&(pThreadInfo->lock_sem)); +#if STMT_IFACE_ENABLED == 1 if (pThreadInfo->stmt) { taos_stmt_close(pThreadInfo->stmt); } +#endif tsem_destroy(&(pThreadInfo->lock_sem)); taos_close(pThreadInfo->taos); From c60cf34575c3569f789b555490b7ddac8a875d77 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 27 May 2021 16:19:13 +0800 Subject: [PATCH 38/82] [TD-4243]:fix coredump when select _block_dist() from table --- src/query/src/qExecutor.c | 2 + src/tsdb/src/tsdbRead.c | 47 +++++------ tests/script/fullGeneralSuite.sim | 1 + tests/script/general/compute/block_dist.sim | 94 +++++++++++++++++++++ tests/script/general/compute/testSuite.sim | 1 + tests/script/regressionSuite.sim | 1 + 6 files changed, 122 insertions(+), 24 deletions(-) create mode 100644 tests/script/general/compute/block_dist.sim diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index eca2a25a35..f97a0c4a74 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2845,6 +2845,8 @@ static void doSetTagValueInParam(void* pTable, int32_t tagColId, tVariant *tag, if (tagColId == TSDB_TBNAME_COLUMN_INDEX) { val = tsdbGetTableName(pTable); assert(val != NULL); + } else if (tagColId == TSDB_BLOCK_DIST_COLUMN_INDEX) { + val = NULL; } else { val = tsdbGetTableTagVal(pTable, tagColId, type, bytes); } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 1b24405952..1cc0780b9d 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -367,40 +367,39 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC goto out_of_memory; } - assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL); + assert(pCond != NULL && pMemRef != NULL); if (ASCENDING_TRAVERSE(pCond->order)) { assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); } else { assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); } - - // allocate buffer in order to load data blocks from file - pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis)); - if (pQueryHandle->statis == NULL) { - goto out_of_memory; - } - - pQueryHandle->pColumns = taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? - if (pQueryHandle->pColumns == NULL) { - goto out_of_memory; - } - - for (int32_t i = 0; i < pCond->numOfCols; ++i) { - SColumnInfoData colInfo = {{0}, 0}; - - colInfo.info = pCond->colList[i]; - colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes); - if (colInfo.pData == NULL) { + if (pCond->numOfCols > 0) { + // allocate buffer in order to load data blocks from file + pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis)); + if (pQueryHandle->statis == NULL) { goto out_of_memory; } - taosArrayPush(pQueryHandle->pColumns, &colInfo); - pQueryHandle->statis[i].colId = colInfo.info.colId; - } - if (pCond->numOfCols > 0) { + pQueryHandle->pColumns = + taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? + if (pQueryHandle->pColumns == NULL) { + goto out_of_memory; + } + + for (int32_t i = 0; i < pCond->numOfCols; ++i) { + SColumnInfoData colInfo = {{0}, 0}; + + colInfo.info = pCond->colList[i]; + colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes); + if (colInfo.pData == NULL) { + goto out_of_memory; + } + taosArrayPush(pQueryHandle->pColumns, &colInfo); + pQueryHandle->statis[i].colId = colInfo.info.colId; + } + pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true); } - STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim index cde51ebdbf..2cd2236200 100644 --- a/tests/script/fullGeneralSuite.sim +++ b/tests/script/fullGeneralSuite.sim @@ -33,6 +33,7 @@ run general/compute/percentile.sim run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim +run general/compute/block_dist.sim run general/db/alter_option.sim run general/db/alter_tables_d2.sim run general/db/alter_tables_v1.sim diff --git a/tests/script/general/compute/block_dist.sim b/tests/script/general/compute/block_dist.sim new file mode 100644 index 0000000000..51cf903654 --- /dev/null +++ b/tests/script/general/compute/block_dist.sim @@ -0,0 +1,94 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 2000 +sql connect + +$dbPrefix = m_di_db +$tbPrefix = m_di_tb +$mtPrefix = m_di_mt +$ntPrefix = m_di_nt +$tbNum = 1 +$rowNum = 2000 + +print =============== step1 +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i +$nt = $ntPrefix . $i + +sql drop database $db -x step1 +step1: +sql create database $db +sql use $db +sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int) + +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( $i ) + + $x = 0 + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + sql insert into $tb values ($ms , $x ) + $x = $x + 1 + endw + + $i = $i + 1 +endw + +sql create table $nt (ts timestamp, tbcol int) +$x = 0 +while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + sql insert into $nt values ($ms , $x ) + $x = $x + 1 +endw + +sleep 100 + +print =============== step2 +$i = 0 +$tb = $tbPrefix . $i + +sql select _block_dist() from $tb + +if $rows != 1 then + print expect 1, actual:$rows + return -1 +endi + +print =============== step3 +$i = 0 +$mt = $mtPrefix . $i +sql select _block_dist() from $mt + +if $rows != 1 then + print expect 1, actual:$rows + return -1 +endi + +print =============== step4 +$i = 0 +$nt = $ntPrefix . $i + +sql select _block_dist() from $nt + +if $rows != 1 then + print expect 1, actual:$rows + return -1 +endi + +print =============== clear +sql drop database $db +sql show databases +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/compute/testSuite.sim b/tests/script/general/compute/testSuite.sim index 6cd6badaee..91bf4bf0cd 100644 --- a/tests/script/general/compute/testSuite.sim +++ b/tests/script/general/compute/testSuite.sim @@ -14,3 +14,4 @@ run general/compute/percentile.sim run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim +run general/compute/block_dist.sim diff --git a/tests/script/regressionSuite.sim b/tests/script/regressionSuite.sim index e5e2194e87..d5742cd98f 100644 --- a/tests/script/regressionSuite.sim +++ b/tests/script/regressionSuite.sim @@ -32,6 +32,7 @@ run general/compute/percentile.sim run general/compute/stddev.sim run general/compute/sum.sim run general/compute/top.sim +run general/compute/block_dist.sim run general/db/alter_option.sim run general/db/alter_tables_d2.sim run general/db/alter_tables_v1.sim From 9291d422c549e4a54f74886aa204c15b94c7edf7 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sun, 30 May 2021 23:34:12 +0800 Subject: [PATCH 39/82] [TD-4360]: Change the log output level from error to info --- src/tsdb/src/tsdbMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 0a4ea5e153..88293cfbbb 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -68,7 +68,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid])); return 0; } else { - tsdbError("vgId:%d table %s at tid %d uid %" PRIu64 + tsdbInfo("vgId:%d table %s at tid %d uid %" PRIu64 " exists, replace it with new table, this can be not reasonable", REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid])); From ca4289388c76c7737513b8586c395dcfa299d921 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 31 May 2021 09:28:19 +0800 Subject: [PATCH 40/82] add test case --- tests/script/general/parser/alter_column.sim | 43 ++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 tests/script/general/parser/alter_column.sim diff --git a/tests/script/general/parser/alter_column.sim b/tests/script/general/parser/alter_column.sim new file mode 100644 index 0000000000..47e59d76d2 --- /dev/null +++ b/tests/script/general/parser/alter_column.sim @@ -0,0 +1,43 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 100 +sql connect + +$dbPrefix = m_alt_db +$tbPrefix = m_alt_tb +$mtPrefix = m_alt_mt +$tbNum = 10 +$rowNum = 5 +$totalNum = $tbNum * $rowNum +$ts0 = 1537146000000 +$delta = 600000 +print ========== alter.sim +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql drop database if exists $db +sql create database $db +sql use $db +##### alter table test, simeplest case +sql create table tb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10)) +sql insert into tb values (now, 1, "1", "1") +sql alter table tb alter column length c2 20; +if $rows != 0 then + return -1 +endi +sql alter table tb alter column length c3 20; +if $rows != 0 then + return -1 +endi + +##### ILLEGAL OPERATIONS + +# try dropping columns that are defined in metric +sql_error alter table tb alter column length c1 10; + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 0bb4e921c8e1e6656438ed5baca22feb92a140f9 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 31 May 2021 11:22:50 +0800 Subject: [PATCH 41/82] support stable --- src/client/src/tscSQLParser.c | 8 +- src/inc/ttokendef.h | 1 + src/query/inc/sql.y | 12 + src/query/src/sql.c | 476 ++++++++++--------- tests/script/general/parser/alter_column.sim | 4 +- 5 files changed, 267 insertions(+), 234 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index ae235ebdda..b0ffab1298 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5335,7 +5335,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { if (taosArrayGetSize(pAlterSQL->pAddColumns) != 2) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), NULL); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), NULL); } tVariantListItem* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0); @@ -5343,20 +5343,20 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17); } SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg21); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21); } pItem = taosArrayGet(pAlterSQL->pAddColumns, 1); int64_t nlen = 0; if (tVariantDump(&pItem->pVar, (char *)&nlen, TSDB_DATA_TYPE_BIGINT, false) < 0 || nlen <= 0) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg22); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22); } TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, nlen); diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 795779a297..e5f1472317 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -210,6 +210,7 @@ + #define TK_SPACE 300 #define TK_COMMENT 301 #define TK_ILLEGAL 302 diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index d1327c43c2..3a6e1c0cc0 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -824,6 +824,18 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). { setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +cmd ::= ALTER STABLE ids(X) cpxName(F) ALTER COLUMN LENGTH ids(A) INTEGER(Z). { + X.n += F.n; + + toTSDBType(A.type); + SArray* K = tVariantListAppendToken(NULL, &A, -1); + toTSDBType(Z.type); + K = tVariantListAppendToken(K, &Z, -1); + + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, K, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + //////////////////////////////////ALTER TAGS statement///////////////////////////////////// cmd ::= ALTER STABLE ids(X) cpxName(Y) ADD TAG columnlist(A). { X.n += Y.n; diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 79b919869d..560e499228 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -136,18 +136,18 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 327 -#define YYNRULE 274 -#define YYNRULE_WITH_ACTION 274 +#define YYNSTATE 331 +#define YYNRULE 275 +#define YYNRULE_WITH_ACTION 275 #define YYNTOKEN 188 -#define YY_MAX_SHIFT 326 -#define YY_MIN_SHIFTREDUCE 523 -#define YY_MAX_SHIFTREDUCE 796 -#define YY_ERROR_ACTION 797 -#define YY_ACCEPT_ACTION 798 -#define YY_NO_ACTION 799 -#define YY_MIN_REDUCE 800 -#define YY_MAX_REDUCE 1073 +#define YY_MAX_SHIFT 330 +#define YY_MIN_SHIFTREDUCE 528 +#define YY_MAX_SHIFTREDUCE 802 +#define YY_ERROR_ACTION 803 +#define YY_ACCEPT_ACTION 804 +#define YY_NO_ACTION 805 +#define YY_MIN_REDUCE 806 +#define YY_MAX_REDUCE 1080 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -214,78 +214,78 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (694) +#define YY_ACTTAB_COUNT (697) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 968, 571, 210, 324, 70, 18, 216, 959, 187, 572, - /* 10 */ 798, 326, 185, 48, 49, 145, 52, 53, 219, 1054, - /* 20 */ 222, 42, 213, 51, 271, 56, 54, 58, 55, 933, - /* 30 */ 650, 187, 947, 47, 46, 187, 932, 45, 44, 43, - /* 40 */ 48, 49, 1053, 52, 53, 218, 1054, 222, 42, 571, - /* 50 */ 51, 271, 56, 54, 58, 55, 959, 572, 300, 299, - /* 60 */ 47, 46, 965, 145, 45, 44, 43, 49, 31, 52, - /* 70 */ 53, 249, 138, 222, 42, 83, 51, 271, 56, 54, - /* 80 */ 58, 55, 287, 1003, 88, 266, 47, 46, 72, 231, - /* 90 */ 45, 44, 43, 524, 525, 526, 527, 528, 529, 530, - /* 100 */ 531, 532, 533, 534, 535, 536, 325, 234, 287, 211, - /* 110 */ 71, 571, 943, 48, 49, 31, 52, 53, 935, 572, - /* 120 */ 222, 42, 571, 51, 271, 56, 54, 58, 55, 268, - /* 130 */ 572, 81, 739, 47, 46, 256, 255, 45, 44, 43, - /* 140 */ 48, 50, 945, 52, 53, 145, 310, 222, 42, 77, - /* 150 */ 51, 271, 56, 54, 58, 55, 212, 37, 232, 944, - /* 160 */ 47, 46, 289, 191, 45, 44, 43, 24, 285, 319, - /* 170 */ 318, 284, 283, 282, 317, 281, 316, 315, 314, 280, - /* 180 */ 313, 312, 907, 31, 895, 896, 897, 898, 899, 900, - /* 190 */ 901, 902, 903, 904, 905, 906, 908, 909, 52, 53, - /* 200 */ 846, 1050, 222, 42, 171, 51, 271, 56, 54, 58, - /* 210 */ 55, 941, 19, 1002, 25, 47, 46, 1049, 959, 45, - /* 220 */ 44, 43, 221, 754, 225, 31, 743, 944, 746, 196, - /* 230 */ 749, 221, 754, 214, 13, 743, 197, 746, 87, 749, - /* 240 */ 84, 122, 121, 195, 45, 44, 43, 109, 56, 54, - /* 250 */ 58, 55, 310, 228, 206, 207, 47, 46, 270, 74, - /* 260 */ 45, 44, 43, 206, 207, 75, 226, 252, 24, 944, - /* 270 */ 319, 318, 77, 252, 745, 317, 748, 316, 315, 314, - /* 280 */ 37, 313, 312, 915, 1048, 674, 913, 914, 671, 204, - /* 290 */ 672, 916, 673, 918, 919, 917, 85, 920, 921, 107, - /* 300 */ 100, 112, 248, 686, 69, 31, 111, 117, 120, 110, - /* 310 */ 8, 203, 5, 34, 161, 114, 236, 237, 689, 160, - /* 320 */ 95, 90, 94, 31, 233, 57, 272, 930, 931, 30, - /* 330 */ 934, 297, 755, 31, 57, 179, 177, 175, 751, 31, - /* 340 */ 145, 755, 174, 125, 124, 123, 290, 751, 220, 944, - /* 350 */ 241, 47, 46, 205, 750, 45, 44, 43, 855, 245, - /* 360 */ 244, 189, 171, 750, 291, 227, 744, 944, 747, 229, - /* 370 */ 323, 322, 130, 320, 298, 847, 99, 944, 98, 171, - /* 380 */ 302, 1, 159, 944, 3, 172, 752, 136, 134, 133, - /* 390 */ 741, 947, 6, 235, 675, 947, 693, 294, 293, 947, - /* 400 */ 720, 721, 250, 705, 65, 711, 32, 140, 82, 61, - /* 410 */ 62, 712, 775, 756, 660, 21, 20, 20, 32, 274, - /* 420 */ 1065, 662, 758, 32, 66, 61, 742, 276, 678, 661, - /* 430 */ 679, 86, 63, 61, 29, 946, 68, 277, 649, 190, - /* 440 */ 15, 106, 14, 105, 676, 192, 677, 186, 17, 193, - /* 450 */ 16, 119, 118, 194, 200, 201, 199, 184, 198, 1013, - /* 460 */ 188, 1012, 223, 1009, 40, 246, 1008, 224, 301, 137, - /* 470 */ 967, 978, 995, 975, 994, 976, 960, 253, 753, 135, - /* 480 */ 980, 139, 143, 155, 156, 942, 251, 940, 704, 257, - /* 490 */ 157, 311, 911, 154, 146, 158, 957, 149, 147, 269, - /* 500 */ 215, 59, 259, 858, 279, 264, 38, 67, 182, 148, - /* 510 */ 35, 288, 854, 64, 1070, 265, 267, 96, 1069, 1067, - /* 520 */ 162, 263, 292, 261, 1064, 102, 295, 1063, 1060, 163, - /* 530 */ 876, 36, 33, 39, 183, 843, 113, 841, 115, 116, - /* 540 */ 839, 838, 238, 173, 836, 835, 834, 833, 832, 831, - /* 550 */ 176, 178, 258, 828, 826, 824, 822, 180, 819, 181, - /* 560 */ 41, 73, 78, 108, 260, 996, 303, 304, 305, 306, - /* 570 */ 307, 308, 309, 208, 321, 796, 230, 278, 239, 240, - /* 580 */ 795, 242, 243, 91, 92, 209, 794, 202, 781, 780, - /* 590 */ 247, 252, 9, 273, 681, 837, 76, 26, 165, 877, - /* 600 */ 166, 126, 167, 164, 169, 168, 170, 127, 830, 2, - /* 610 */ 128, 129, 829, 821, 820, 254, 79, 706, 4, 150, - /* 620 */ 151, 152, 153, 141, 923, 709, 80, 142, 217, 262, - /* 630 */ 27, 713, 144, 10, 11, 757, 28, 7, 12, 22, - /* 640 */ 759, 23, 89, 275, 613, 609, 87, 607, 606, 605, - /* 650 */ 602, 575, 286, 97, 93, 32, 784, 60, 652, 651, - /* 660 */ 648, 597, 595, 101, 103, 587, 593, 589, 591, 585, - /* 670 */ 104, 583, 616, 615, 614, 612, 611, 296, 610, 608, - /* 680 */ 604, 603, 61, 573, 540, 538, 131, 800, 799, 799, - /* 690 */ 799, 799, 799, 132, + /* 0 */ 974, 576, 211, 328, 70, 18, 217, 965, 188, 577, + /* 10 */ 804, 330, 186, 48, 49, 146, 52, 53, 220, 1060, + /* 20 */ 223, 42, 214, 51, 272, 56, 54, 58, 55, 939, + /* 30 */ 655, 188, 953, 47, 46, 188, 938, 45, 44, 43, + /* 40 */ 48, 49, 1059, 52, 53, 219, 1060, 223, 42, 576, + /* 50 */ 51, 272, 56, 54, 58, 55, 965, 577, 304, 303, + /* 60 */ 47, 46, 971, 146, 45, 44, 43, 49, 31, 52, + /* 70 */ 53, 250, 139, 223, 42, 83, 51, 272, 56, 54, + /* 80 */ 58, 55, 288, 1009, 88, 267, 47, 46, 72, 314, + /* 90 */ 45, 44, 43, 529, 530, 531, 532, 533, 534, 535, + /* 100 */ 536, 537, 538, 539, 540, 541, 329, 235, 288, 212, + /* 110 */ 71, 576, 949, 48, 49, 31, 52, 53, 941, 577, + /* 120 */ 223, 42, 576, 51, 272, 56, 54, 58, 55, 269, + /* 130 */ 577, 81, 744, 47, 46, 257, 256, 45, 44, 43, + /* 140 */ 48, 50, 951, 52, 53, 146, 192, 223, 42, 77, + /* 150 */ 51, 272, 56, 54, 58, 55, 213, 37, 947, 950, + /* 160 */ 47, 46, 1, 160, 45, 44, 43, 24, 286, 323, + /* 170 */ 322, 285, 284, 283, 321, 282, 320, 319, 318, 281, + /* 180 */ 317, 316, 913, 31, 901, 902, 903, 904, 905, 906, + /* 190 */ 907, 908, 909, 910, 911, 912, 914, 915, 52, 53, + /* 200 */ 229, 29, 223, 42, 278, 51, 272, 56, 54, 58, + /* 210 */ 55, 694, 19, 1008, 25, 47, 46, 746, 965, 45, + /* 220 */ 44, 43, 222, 759, 226, 31, 748, 950, 751, 197, + /* 230 */ 754, 222, 759, 215, 13, 748, 198, 751, 87, 754, + /* 240 */ 84, 123, 122, 196, 45, 44, 43, 110, 56, 54, + /* 250 */ 58, 55, 314, 747, 208, 209, 47, 46, 271, 74, + /* 260 */ 45, 44, 43, 208, 209, 75, 227, 253, 24, 950, + /* 270 */ 323, 322, 77, 253, 750, 321, 753, 320, 319, 318, + /* 280 */ 37, 317, 316, 921, 1056, 679, 919, 920, 676, 698, + /* 290 */ 677, 922, 678, 924, 925, 923, 85, 926, 927, 108, + /* 300 */ 101, 113, 249, 691, 69, 31, 112, 118, 121, 111, + /* 310 */ 8, 205, 5, 34, 162, 115, 237, 238, 273, 161, + /* 320 */ 95, 90, 94, 31, 234, 57, 232, 936, 937, 30, + /* 330 */ 940, 301, 760, 293, 57, 180, 178, 176, 756, 31, + /* 340 */ 31, 760, 175, 126, 125, 124, 294, 756, 146, 950, + /* 350 */ 242, 47, 46, 1055, 755, 45, 44, 43, 1054, 246, + /* 360 */ 245, 228, 230, 755, 295, 324, 749, 950, 752, 852, + /* 370 */ 327, 326, 131, 172, 137, 135, 134, 3, 173, 1071, + /* 380 */ 302, 306, 221, 950, 950, 861, 757, 953, 953, 172, + /* 390 */ 62, 953, 853, 236, 680, 233, 172, 298, 297, 290, + /* 400 */ 725, 726, 251, 710, 716, 717, 32, 141, 61, 21, + /* 410 */ 65, 780, 63, 761, 763, 20, 82, 20, 665, 275, + /* 420 */ 667, 277, 32, 32, 61, 86, 6, 100, 666, 99, + /* 430 */ 66, 15, 61, 14, 107, 68, 106, 654, 206, 683, + /* 440 */ 17, 684, 16, 681, 207, 682, 120, 119, 952, 190, + /* 450 */ 191, 193, 187, 194, 195, 201, 202, 200, 185, 1019, + /* 460 */ 199, 189, 1018, 224, 40, 1015, 1014, 225, 305, 247, + /* 470 */ 138, 973, 156, 984, 1001, 981, 982, 966, 758, 254, + /* 480 */ 1000, 986, 140, 144, 136, 948, 157, 258, 148, 216, + /* 490 */ 709, 917, 963, 147, 149, 946, 150, 151, 158, 266, + /* 500 */ 159, 864, 280, 260, 265, 67, 64, 59, 38, 270, + /* 510 */ 183, 35, 289, 264, 268, 860, 1077, 96, 291, 1076, + /* 520 */ 1073, 163, 262, 296, 1070, 103, 299, 1069, 1066, 164, + /* 530 */ 882, 36, 33, 39, 184, 849, 114, 847, 116, 117, + /* 540 */ 845, 844, 239, 174, 842, 841, 840, 839, 838, 837, + /* 550 */ 177, 179, 41, 834, 832, 830, 828, 181, 825, 182, + /* 560 */ 259, 252, 315, 73, 78, 109, 261, 1002, 307, 308, + /* 570 */ 309, 310, 311, 312, 210, 313, 231, 325, 279, 802, + /* 580 */ 241, 240, 801, 204, 203, 243, 91, 92, 244, 800, + /* 590 */ 843, 786, 785, 248, 127, 274, 253, 686, 836, 167, + /* 600 */ 128, 166, 883, 165, 168, 169, 171, 129, 170, 835, + /* 610 */ 2, 130, 9, 827, 826, 26, 76, 4, 255, 79, + /* 620 */ 711, 152, 153, 154, 155, 929, 142, 218, 714, 143, + /* 630 */ 80, 263, 764, 718, 145, 10, 11, 762, 27, 7, + /* 640 */ 28, 12, 22, 276, 23, 89, 618, 87, 614, 612, + /* 650 */ 611, 610, 607, 580, 287, 93, 97, 796, 32, 789, + /* 660 */ 657, 656, 653, 98, 60, 102, 602, 600, 592, 598, + /* 670 */ 594, 292, 596, 590, 104, 588, 621, 620, 619, 617, + /* 680 */ 105, 300, 616, 615, 613, 609, 608, 61, 578, 545, + /* 690 */ 132, 543, 806, 805, 805, 805, 133, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 191, 1, 190, 191, 197, 252, 210, 234, 252, 9, @@ -296,68 +296,68 @@ static const YYCODETYPE yy_lookahead[] = { /* 50 */ 23, 24, 25, 26, 27, 28, 234, 9, 33, 34, /* 60 */ 33, 34, 253, 191, 37, 38, 39, 14, 191, 16, /* 70 */ 17, 249, 191, 20, 21, 237, 23, 24, 25, 26, - /* 80 */ 27, 28, 79, 259, 197, 261, 33, 34, 250, 68, + /* 80 */ 27, 28, 79, 259, 197, 261, 33, 34, 250, 81, /* 90 */ 37, 38, 39, 45, 46, 47, 48, 49, 50, 51, /* 100 */ 52, 53, 54, 55, 56, 57, 58, 191, 79, 61, /* 110 */ 110, 1, 235, 13, 14, 191, 16, 17, 231, 9, /* 120 */ 20, 21, 1, 23, 24, 25, 26, 27, 28, 257, /* 130 */ 9, 259, 105, 33, 34, 254, 255, 37, 38, 39, - /* 140 */ 13, 14, 226, 16, 17, 191, 81, 20, 21, 104, - /* 150 */ 23, 24, 25, 26, 27, 28, 232, 112, 137, 235, - /* 160 */ 33, 34, 141, 252, 37, 38, 39, 88, 89, 90, + /* 140 */ 13, 14, 226, 16, 17, 191, 252, 20, 21, 104, + /* 150 */ 23, 24, 25, 26, 27, 28, 232, 112, 191, 235, + /* 160 */ 33, 34, 198, 199, 37, 38, 39, 88, 89, 90, /* 170 */ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, /* 180 */ 101, 102, 209, 191, 211, 212, 213, 214, 215, 216, /* 190 */ 217, 218, 219, 220, 221, 222, 223, 224, 16, 17, - /* 200 */ 196, 252, 20, 21, 200, 23, 24, 25, 26, 27, - /* 210 */ 28, 191, 44, 259, 104, 33, 34, 252, 234, 37, + /* 200 */ 233, 104, 20, 21, 107, 23, 24, 25, 26, 27, + /* 210 */ 28, 37, 44, 259, 104, 33, 34, 1, 234, 37, /* 220 */ 38, 39, 1, 2, 232, 191, 5, 235, 7, 61, /* 230 */ 9, 1, 2, 249, 104, 5, 68, 7, 108, 9, /* 240 */ 110, 73, 74, 75, 37, 38, 39, 76, 25, 26, - /* 250 */ 27, 28, 81, 233, 33, 34, 33, 34, 37, 105, + /* 250 */ 27, 28, 81, 37, 33, 34, 33, 34, 37, 105, /* 260 */ 37, 38, 39, 33, 34, 105, 232, 113, 88, 235, /* 270 */ 90, 91, 104, 113, 5, 95, 7, 97, 98, 99, - /* 280 */ 112, 101, 102, 209, 252, 2, 212, 213, 5, 252, + /* 280 */ 112, 101, 102, 209, 252, 2, 212, 213, 5, 115, /* 290 */ 7, 217, 9, 219, 220, 221, 197, 223, 224, 62, /* 300 */ 63, 64, 134, 109, 136, 191, 69, 70, 71, 72, - /* 310 */ 116, 143, 62, 63, 64, 78, 33, 34, 37, 69, - /* 320 */ 70, 71, 72, 191, 68, 104, 15, 228, 229, 230, - /* 330 */ 231, 75, 111, 191, 104, 62, 63, 64, 117, 191, - /* 340 */ 191, 111, 69, 70, 71, 72, 232, 117, 60, 235, - /* 350 */ 135, 33, 34, 252, 133, 37, 38, 39, 196, 144, - /* 360 */ 145, 252, 200, 133, 232, 210, 5, 235, 7, 210, - /* 370 */ 65, 66, 67, 210, 232, 196, 138, 235, 140, 200, - /* 380 */ 232, 198, 199, 235, 194, 195, 117, 62, 63, 64, - /* 390 */ 1, 236, 104, 137, 111, 236, 115, 141, 142, 236, - /* 400 */ 124, 125, 105, 105, 109, 105, 109, 109, 259, 109, - /* 410 */ 109, 105, 105, 105, 105, 109, 109, 109, 109, 105, - /* 420 */ 236, 105, 111, 109, 129, 109, 37, 105, 5, 105, - /* 430 */ 7, 109, 131, 109, 104, 236, 104, 107, 106, 252, - /* 440 */ 138, 138, 140, 140, 5, 252, 7, 252, 138, 252, - /* 450 */ 140, 76, 77, 252, 252, 252, 252, 252, 252, 227, - /* 460 */ 252, 227, 227, 227, 251, 191, 227, 227, 227, 191, - /* 470 */ 191, 191, 260, 191, 260, 191, 234, 234, 117, 60, - /* 480 */ 191, 191, 191, 238, 191, 234, 192, 191, 117, 256, - /* 490 */ 191, 103, 225, 239, 247, 191, 248, 244, 246, 122, - /* 500 */ 256, 127, 256, 191, 191, 256, 191, 128, 191, 245, - /* 510 */ 191, 191, 191, 130, 191, 121, 126, 191, 191, 191, - /* 520 */ 191, 120, 191, 119, 191, 191, 191, 191, 191, 191, + /* 310 */ 116, 143, 62, 63, 64, 78, 33, 34, 15, 69, + /* 320 */ 70, 71, 72, 191, 68, 104, 68, 228, 229, 230, + /* 330 */ 231, 75, 111, 75, 104, 62, 63, 64, 117, 191, + /* 340 */ 191, 111, 69, 70, 71, 72, 232, 117, 191, 235, + /* 350 */ 135, 33, 34, 252, 133, 37, 38, 39, 252, 144, + /* 360 */ 145, 210, 210, 133, 232, 210, 5, 235, 7, 196, + /* 370 */ 65, 66, 67, 200, 62, 63, 64, 194, 195, 236, + /* 380 */ 232, 232, 60, 235, 235, 196, 117, 236, 236, 200, + /* 390 */ 109, 236, 196, 137, 111, 137, 200, 141, 142, 141, + /* 400 */ 124, 125, 105, 105, 105, 105, 109, 109, 109, 109, + /* 410 */ 109, 105, 131, 105, 111, 109, 259, 109, 105, 105, + /* 420 */ 105, 105, 109, 109, 109, 109, 104, 138, 105, 140, + /* 430 */ 129, 138, 109, 140, 138, 104, 140, 106, 252, 5, + /* 440 */ 138, 7, 140, 5, 252, 7, 76, 77, 236, 252, + /* 450 */ 252, 252, 252, 252, 252, 252, 252, 252, 252, 227, + /* 460 */ 252, 252, 227, 227, 251, 227, 227, 227, 227, 191, + /* 470 */ 191, 191, 238, 191, 260, 191, 191, 234, 117, 234, + /* 480 */ 260, 191, 191, 191, 60, 234, 191, 256, 246, 256, + /* 490 */ 117, 225, 248, 247, 245, 191, 244, 243, 191, 121, + /* 500 */ 191, 191, 191, 256, 256, 128, 130, 127, 191, 122, + /* 510 */ 191, 191, 191, 120, 126, 191, 191, 191, 191, 191, + /* 520 */ 191, 191, 119, 191, 191, 191, 191, 191, 191, 191, /* 530 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, /* 540 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, - /* 550 */ 191, 191, 118, 191, 191, 191, 191, 191, 191, 191, - /* 560 */ 132, 192, 192, 87, 192, 192, 86, 50, 83, 85, - /* 570 */ 54, 84, 82, 192, 79, 5, 192, 192, 146, 5, - /* 580 */ 5, 146, 5, 197, 197, 192, 5, 192, 90, 89, - /* 590 */ 135, 113, 104, 107, 105, 192, 114, 104, 206, 208, - /* 600 */ 202, 193, 205, 207, 204, 203, 201, 193, 192, 198, - /* 610 */ 193, 193, 192, 192, 192, 109, 109, 105, 194, 243, - /* 620 */ 242, 241, 240, 104, 225, 105, 104, 109, 1, 104, - /* 630 */ 109, 105, 104, 123, 123, 105, 109, 104, 104, 104, - /* 640 */ 111, 104, 76, 107, 9, 5, 108, 5, 5, 5, - /* 650 */ 5, 80, 15, 140, 76, 109, 5, 16, 5, 5, - /* 660 */ 105, 5, 5, 140, 140, 5, 5, 5, 5, 5, - /* 670 */ 139, 5, 5, 5, 5, 5, 5, 138, 5, 5, - /* 680 */ 5, 5, 109, 80, 60, 59, 21, 0, 264, 264, - /* 690 */ 264, 264, 264, 21, 264, 264, 264, 264, 264, 264, + /* 550 */ 191, 191, 132, 191, 191, 191, 191, 191, 191, 191, + /* 560 */ 118, 192, 103, 192, 192, 87, 192, 192, 86, 50, + /* 570 */ 83, 85, 54, 84, 192, 82, 192, 79, 192, 5, + /* 580 */ 5, 146, 5, 192, 192, 146, 197, 197, 5, 5, + /* 590 */ 192, 90, 89, 135, 193, 107, 113, 105, 192, 202, + /* 600 */ 193, 206, 208, 207, 205, 203, 201, 193, 204, 192, + /* 610 */ 198, 193, 104, 192, 192, 104, 114, 194, 109, 109, + /* 620 */ 105, 242, 241, 240, 239, 225, 104, 1, 105, 109, + /* 630 */ 104, 104, 111, 105, 104, 123, 123, 105, 109, 104, + /* 640 */ 109, 104, 104, 107, 104, 76, 9, 108, 5, 5, + /* 650 */ 5, 5, 5, 80, 15, 76, 140, 5, 109, 5, + /* 660 */ 5, 5, 105, 139, 16, 140, 5, 5, 5, 5, + /* 670 */ 5, 138, 5, 5, 140, 5, 5, 5, 5, 5, + /* 680 */ 139, 138, 5, 5, 5, 5, 5, 109, 80, 60, + /* 690 */ 21, 59, 0, 264, 264, 264, 21, 264, 264, 264, /* 700 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, /* 710 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, /* 720 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, @@ -376,104 +376,106 @@ static const YYCODETYPE yy_lookahead[] = { /* 850 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, /* 860 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, /* 870 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, - /* 880 */ 264, 264, + /* 880 */ 264, 264, 264, 264, 264, }; -#define YY_SHIFT_COUNT (326) +#define YY_SHIFT_COUNT (330) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (687) +#define YY_SHIFT_MAX (692) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 168, 79, 79, 180, 180, 3, 221, 230, 110, 121, /* 10 */ 121, 121, 121, 121, 121, 121, 121, 121, 0, 48, /* 20 */ 230, 283, 283, 283, 283, 45, 45, 121, 121, 121, - /* 30 */ 29, 121, 121, 171, 3, 65, 65, 694, 694, 694, + /* 30 */ 29, 121, 121, 171, 3, 8, 8, 697, 697, 697, /* 40 */ 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, /* 50 */ 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, /* 60 */ 283, 283, 25, 25, 25, 25, 25, 25, 25, 121, - /* 70 */ 121, 121, 281, 121, 121, 121, 45, 45, 121, 121, + /* 70 */ 121, 121, 174, 121, 121, 121, 45, 45, 121, 121, /* 80 */ 121, 276, 276, 194, 45, 121, 121, 121, 121, 121, /* 90 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, /* 100 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, /* 110 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, /* 120 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, - /* 130 */ 121, 121, 121, 121, 121, 121, 121, 419, 419, 419, - /* 140 */ 371, 371, 371, 419, 371, 419, 379, 383, 374, 377, - /* 150 */ 390, 394, 401, 404, 434, 428, 419, 419, 419, 388, - /* 160 */ 3, 3, 419, 419, 476, 480, 517, 485, 484, 516, - /* 170 */ 487, 490, 388, 419, 495, 495, 419, 495, 419, 495, - /* 180 */ 419, 419, 694, 694, 27, 100, 127, 100, 100, 53, - /* 190 */ 182, 223, 223, 223, 223, 237, 250, 273, 318, 318, - /* 200 */ 318, 318, 256, 215, 207, 207, 269, 361, 130, 21, - /* 210 */ 305, 325, 297, 154, 160, 298, 300, 306, 307, 308, - /* 220 */ 389, 288, 311, 301, 295, 309, 314, 316, 322, 324, - /* 230 */ 330, 238, 302, 303, 332, 310, 423, 439, 375, 570, - /* 240 */ 432, 574, 575, 435, 577, 581, 498, 500, 455, 478, - /* 250 */ 486, 488, 482, 489, 493, 506, 507, 512, 519, 520, - /* 260 */ 518, 522, 627, 525, 526, 528, 521, 510, 527, 511, - /* 270 */ 530, 533, 529, 534, 486, 535, 536, 537, 538, 566, - /* 280 */ 635, 640, 642, 643, 644, 645, 571, 637, 578, 513, - /* 290 */ 546, 546, 641, 523, 524, 651, 531, 539, 546, 653, - /* 300 */ 654, 555, 546, 656, 657, 660, 661, 662, 663, 664, - /* 310 */ 666, 667, 668, 669, 670, 671, 673, 674, 675, 676, - /* 320 */ 573, 603, 665, 672, 624, 626, 687, + /* 130 */ 121, 121, 121, 121, 121, 121, 121, 121, 424, 424, + /* 140 */ 424, 373, 373, 373, 424, 373, 424, 377, 376, 380, + /* 150 */ 387, 388, 378, 393, 403, 442, 420, 424, 424, 424, + /* 160 */ 459, 3, 3, 424, 424, 478, 482, 519, 487, 486, + /* 170 */ 518, 489, 493, 459, 424, 498, 498, 424, 498, 424, + /* 180 */ 498, 424, 424, 697, 697, 27, 100, 127, 100, 100, + /* 190 */ 53, 182, 223, 223, 223, 223, 237, 250, 273, 318, + /* 200 */ 318, 318, 318, 256, 258, 215, 207, 207, 269, 361, + /* 210 */ 130, 305, 312, 297, 154, 160, 298, 299, 300, 306, + /* 220 */ 308, 216, 322, 303, 281, 301, 313, 314, 315, 316, + /* 230 */ 323, 97, 289, 293, 296, 331, 302, 434, 438, 370, + /* 240 */ 574, 435, 575, 577, 439, 583, 584, 501, 503, 458, + /* 250 */ 483, 488, 508, 502, 492, 511, 509, 510, 515, 522, + /* 260 */ 523, 520, 526, 626, 527, 528, 530, 529, 512, 531, + /* 270 */ 513, 532, 535, 521, 537, 488, 538, 536, 540, 539, + /* 280 */ 569, 637, 643, 644, 645, 646, 647, 573, 639, 579, + /* 290 */ 516, 652, 524, 533, 549, 549, 648, 525, 534, 654, + /* 300 */ 541, 543, 549, 655, 656, 557, 549, 661, 662, 663, + /* 310 */ 664, 665, 667, 668, 670, 671, 672, 673, 674, 677, + /* 320 */ 678, 679, 680, 681, 578, 608, 669, 675, 629, 632, + /* 330 */ 692, }; -#define YY_REDUCE_COUNT (183) +#define YY_REDUCE_COUNT (184) #define YY_REDUCE_MIN (-247) -#define YY_REDUCE_MAX (424) +#define YY_REDUCE_MAX (423) static const short yy_reduce_ofst[] = { /* 0 */ -178, -27, -27, 74, 74, 99, -244, -217, -119, -76, - /* 10 */ -176, -128, -8, 34, 114, 132, 142, 148, -191, -188, - /* 20 */ -221, -204, 155, 159, 163, -227, -16, -46, 149, 20, - /* 30 */ -113, -84, -123, 4, -193, 162, 179, -162, 183, 190, - /* 40 */ -247, -240, -89, -51, -35, 32, 37, 101, 109, 187, - /* 50 */ 193, 195, 197, 201, 202, 203, 204, 205, 206, 208, - /* 60 */ 184, 199, 232, 234, 235, 236, 239, 240, 241, 274, - /* 70 */ 278, 279, 213, 280, 282, 284, 242, 243, 289, 290, - /* 80 */ 291, 212, 214, 245, 251, 293, 296, 299, 304, 312, - /* 90 */ 313, 315, 317, 319, 320, 321, 323, 326, 327, 328, - /* 100 */ 329, 331, 333, 334, 335, 336, 337, 338, 339, 340, - /* 110 */ 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, - /* 120 */ 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, - /* 130 */ 362, 363, 364, 365, 366, 367, 368, 294, 369, 370, - /* 140 */ 233, 244, 246, 372, 249, 373, 248, 247, 252, 264, - /* 150 */ 253, 376, 378, 380, 382, 254, 381, 384, 385, 267, - /* 160 */ 386, 387, 393, 395, 391, 396, 392, 398, 397, 402, - /* 170 */ 400, 405, 399, 403, 408, 414, 416, 417, 420, 418, - /* 180 */ 421, 422, 411, 424, + /* 10 */ -176, -128, -8, 34, 114, 132, 148, 149, -191, -188, + /* 20 */ -221, -204, 151, 152, 155, -227, -16, -46, 157, -33, + /* 30 */ -113, -84, -123, 173, -193, 189, 196, -162, -36, 183, + /* 40 */ -247, -240, -106, 32, 101, 106, 186, 192, 197, 198, + /* 50 */ 199, 200, 201, 202, 203, 204, 205, 206, 208, 209, + /* 60 */ 143, 212, 232, 235, 236, 238, 239, 240, 241, 278, + /* 70 */ 279, 280, 213, 282, 284, 285, 243, 245, 290, 291, + /* 80 */ 292, 214, 220, 234, 251, 295, 304, 307, 309, 310, + /* 90 */ 311, 317, 319, 320, 321, 324, 325, 326, 327, 328, + /* 100 */ 329, 330, 332, 333, 334, 335, 336, 337, 338, 339, + /* 110 */ 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, + /* 120 */ 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + /* 130 */ 360, 362, 363, 364, 365, 366, 367, 368, 369, 371, + /* 140 */ 372, 231, 233, 247, 374, 248, 375, 244, 246, 242, + /* 150 */ 249, 252, 254, 379, 381, 383, 385, 382, 384, 386, + /* 160 */ 266, 389, 390, 391, 392, 394, 396, 395, 397, 399, + /* 170 */ 402, 404, 405, 400, 398, 401, 407, 406, 414, 417, + /* 180 */ 418, 421, 422, 412, 423, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 797, 910, 856, 922, 844, 853, 1056, 1056, 797, 797, - /* 10 */ 797, 797, 797, 797, 797, 797, 797, 797, 969, 816, - /* 20 */ 1056, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 30 */ 853, 797, 797, 859, 853, 859, 859, 964, 894, 912, - /* 40 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 50 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 60 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 70 */ 797, 797, 971, 977, 974, 797, 797, 797, 979, 797, - /* 80 */ 797, 999, 999, 962, 797, 797, 797, 797, 797, 797, - /* 90 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 100 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 110 */ 797, 797, 797, 842, 797, 840, 797, 797, 797, 797, - /* 120 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 130 */ 827, 797, 797, 797, 797, 797, 797, 818, 818, 818, - /* 140 */ 797, 797, 797, 818, 797, 818, 1006, 1010, 1004, 992, - /* 150 */ 1000, 991, 987, 985, 984, 1014, 818, 818, 818, 857, - /* 160 */ 853, 853, 818, 818, 875, 873, 871, 863, 869, 865, - /* 170 */ 867, 861, 845, 818, 851, 851, 818, 851, 818, 851, - /* 180 */ 818, 818, 894, 912, 797, 1015, 797, 1055, 1005, 1045, - /* 190 */ 1044, 1051, 1043, 1042, 1041, 797, 797, 797, 1037, 1038, - /* 200 */ 1040, 1039, 797, 797, 1047, 1046, 797, 797, 797, 797, - /* 210 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 220 */ 797, 1017, 797, 1011, 1007, 797, 797, 797, 797, 797, - /* 230 */ 797, 797, 797, 797, 924, 797, 797, 797, 797, 797, - /* 240 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 961, - /* 250 */ 797, 797, 797, 797, 797, 973, 972, 797, 797, 797, - /* 260 */ 797, 797, 797, 797, 797, 797, 1001, 797, 993, 797, - /* 270 */ 797, 797, 797, 797, 936, 797, 797, 797, 797, 797, - /* 280 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 290 */ 1068, 1066, 797, 797, 797, 797, 797, 797, 1062, 797, - /* 300 */ 797, 797, 1059, 797, 797, 797, 797, 797, 797, 797, - /* 310 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797, - /* 320 */ 878, 797, 825, 823, 797, 814, 797, + /* 0 */ 803, 916, 862, 928, 850, 859, 1062, 1062, 803, 803, + /* 10 */ 803, 803, 803, 803, 803, 803, 803, 803, 975, 822, + /* 20 */ 1062, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 30 */ 859, 803, 803, 865, 859, 865, 865, 970, 900, 918, + /* 40 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 50 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 60 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 70 */ 803, 803, 977, 983, 980, 803, 803, 803, 985, 803, + /* 80 */ 803, 1005, 1005, 968, 803, 803, 803, 803, 803, 803, + /* 90 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 100 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 110 */ 803, 803, 803, 803, 848, 803, 846, 803, 803, 803, + /* 120 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 130 */ 803, 833, 803, 803, 803, 803, 803, 803, 824, 824, + /* 140 */ 824, 803, 803, 803, 824, 803, 824, 1012, 1016, 1010, + /* 150 */ 998, 1006, 997, 993, 991, 990, 1020, 824, 824, 824, + /* 160 */ 863, 859, 859, 824, 824, 881, 879, 877, 869, 875, + /* 170 */ 871, 873, 867, 851, 824, 857, 857, 824, 857, 824, + /* 180 */ 857, 824, 824, 900, 918, 803, 1021, 803, 1061, 1011, + /* 190 */ 1051, 1050, 1057, 1049, 1048, 1047, 803, 803, 803, 1043, + /* 200 */ 1044, 1046, 1045, 803, 803, 803, 1053, 1052, 803, 803, + /* 210 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 220 */ 803, 803, 1023, 803, 1017, 1013, 803, 803, 803, 803, + /* 230 */ 803, 803, 803, 803, 803, 930, 803, 803, 803, 803, + /* 240 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 250 */ 967, 803, 803, 803, 803, 803, 979, 978, 803, 803, + /* 260 */ 803, 803, 803, 803, 803, 803, 803, 1007, 803, 999, + /* 270 */ 803, 803, 803, 803, 803, 942, 803, 803, 803, 803, + /* 280 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 290 */ 803, 803, 803, 803, 1075, 1072, 803, 803, 803, 803, + /* 300 */ 803, 803, 1068, 803, 803, 803, 1065, 803, 803, 803, + /* 310 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, + /* 320 */ 803, 803, 803, 803, 884, 803, 831, 829, 803, 820, + /* 330 */ 803, }; /********** End of lemon-generated parsing tables *****************************/ @@ -1307,12 +1309,13 @@ static const char *const yyRuleName[] = { /* 265 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", /* 266 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", /* 267 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 268 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 269 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 270 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 271 */ "cmd ::= KILL CONNECTION INTEGER", - /* 272 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 273 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 268 */ "cmd ::= ALTER STABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER", + /* 269 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 270 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 271 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 272 */ "cmd ::= KILL CONNECTION INTEGER", + /* 273 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 274 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -2047,12 +2050,13 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 189, /* (265) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ 189, /* (266) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ 189, /* (267) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - 189, /* (268) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - 189, /* (269) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - 189, /* (270) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - 189, /* (271) cmd ::= KILL CONNECTION INTEGER */ - 189, /* (272) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - 189, /* (273) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + 189, /* (268) cmd ::= ALTER STABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ + 189, /* (269) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 189, /* (270) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 189, /* (271) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 189, /* (272) cmd ::= KILL CONNECTION INTEGER */ + 189, /* (273) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 189, /* (274) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -2326,12 +2330,13 @@ static const signed char yyRuleInfoNRhs[] = { -9, /* (265) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ -7, /* (266) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ -7, /* (267) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - -7, /* (268) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - -7, /* (269) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - -8, /* (270) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - -3, /* (271) cmd ::= KILL CONNECTION INTEGER */ - -5, /* (272) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - -5, /* (273) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + -9, /* (268) cmd ::= ALTER STABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ + -7, /* (269) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (270) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (271) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -3, /* (272) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (273) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (274) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -3410,14 +3415,27 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 268: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 268: /* cmd ::= ALTER STABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ +{ + yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; + + toTSDBType(yymsp[-1].minor.yy0.type); + SArray* K = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + toTSDBType(yymsp[0].minor.yy0.type); + K = tVariantListAppendToken(K, &yymsp[0].minor.yy0, -1); + + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, K, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + break; + case 269: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy285, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 269: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 270: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3428,7 +3446,7 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 270: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 271: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3442,13 +3460,13 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 271: /* cmd ::= KILL CONNECTION INTEGER */ + case 272: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 272: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 273: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 273: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 274: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: diff --git a/tests/script/general/parser/alter_column.sim b/tests/script/general/parser/alter_column.sim index 12c117c096..7f30498d06 100644 --- a/tests/script/general/parser/alter_column.sim +++ b/tests/script/general/parser/alter_column.sim @@ -38,7 +38,7 @@ endi sql create stable stb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10)) tags(id int) sql create table tb1 using stb tags(1) sql insert into tb1 values (now, 1, "1", "1") -sql alter table stb alter column length c2 20; +sql alter stable stb alter column length c2 20; if $rows != 0 then return -1 endi @@ -47,7 +47,9 @@ endi # try dropping columns that are defined in metric sql_error alter table tb alter column length c1 10; +sql_error alter stable tb alter column length c2 10; sql_error alter table tb1 alter column length c2 10; +sql_error alter stable tb1 alter column length c2 10; system sh/exec.sh -n dnode1 -s stop -x SIGINT From 7f3fab499389316cd6b37d471ec42147ad758e7a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 31 May 2021 11:39:32 +0800 Subject: [PATCH 42/82] Feature/sangshuduo/td 4068 taosdemo stmt for master (#6300) * [TD-4068]: taosdemo support stmt. for easy merge purpose. disabled in master. * fix clang compile error. * fix memory leak, add more macros. change sqlcount to int * fix rest segfault. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f0b8cf7ca9..993bb40ee2 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2910,8 +2910,8 @@ static void* createTable(void *sarg) int buff_len; buff_len = BUFFER_SIZE / 8; - char *buffer = calloc(buff_len, 1); - if (buffer == NULL) { + pThreadInfo->buffer = calloc(buff_len, 1); + if (pThreadInfo->buffer == NULL) { errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); exit(-1); } @@ -2926,7 +2926,7 @@ static void* createTable(void *sarg) for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { - snprintf(buffer, buff_len, + snprintf(pThreadInfo->buffer, buff_len, "create table if not exists %s.%s%"PRIu64" %s;", pThreadInfo->db_name, g_args.tb_prefix, i, @@ -2935,13 +2935,13 @@ static void* createTable(void *sarg) if (superTblInfo == NULL) { errorPrint("%s() LN%d, use metric, but super table info is NULL\n", __func__, __LINE__); - free(buffer); + free(pThreadInfo->buffer); exit(-1); } else { if (0 == len) { batchNum = 0; - memset(buffer, 0, buff_len); - len += snprintf(buffer + len, + memset(pThreadInfo->buffer, 0, buff_len); + len += snprintf(pThreadInfo->buffer + len, buff_len - len, "create table "); } char* tagsValBuf = NULL; @@ -2953,10 +2953,10 @@ static void* createTable(void *sarg) i % superTblInfo->tagSampleCount); } if (NULL == tagsValBuf) { - free(buffer); + free(pThreadInfo->buffer); return NULL; } - len += snprintf(buffer + len, + len += snprintf(pThreadInfo->buffer + len, buff_len - len, "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", pThreadInfo->db_name, superTblInfo->childTblPrefix, @@ -2973,9 +2973,10 @@ static void* createTable(void *sarg) } len = 0; - if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)){ - errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); - free(buffer); + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)){ + errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + free(pThreadInfo->buffer); return NULL; } @@ -2988,12 +2989,13 @@ static void* createTable(void *sarg) } if (0 != len) { - if (0 != queryDbExec(pThreadInfo->taos, buffer, NO_INSERT_TYPE, false)) { - errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)) { + errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); } } - free(buffer); + free(pThreadInfo->buffer); return NULL; } @@ -4932,7 +4934,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) case REST_IFACE: if (0 != postProceSql(g_Dbs.host, &g_Dbs.serv_addr, g_Dbs.port, - pThreadInfo->buffer, NULL /* not set result file */)) { + pThreadInfo->buffer, pThreadInfo)) { affectedRows = -1; printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID); From 5276ccc334775167313e641313353193ac9b2b31 Mon Sep 17 00:00:00 2001 From: lichuang Date: Mon, 31 May 2021 11:56:19 +0800 Subject: [PATCH 43/82] [TD-1568]fix tdMergeDataCols bug --- src/common/src/tdataformat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index db73905119..7ae34d532c 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -462,8 +462,8 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * } } target->numOfRows++; - (*pOffset)++; } + (*pOffset) += rowsToMerge; } else { pTarget = tdDupDataCols(target, true); if (pTarget == NULL) goto _err; From cd76a29533d5575990de44a11c8dfe0da48501de Mon Sep 17 00:00:00 2001 From: Steven Li Date: Mon, 31 May 2021 06:22:20 +0000 Subject: [PATCH 44/82] Adjust table locking in crash_gen to expose same-connection consistency issues, supporting TD-4444 --- src/connector/python/taos/__init__.py | 4 + tests/pytest/crash_gen/crash_gen_main.py | 139 ++++++++++++++++------ tests/pytest/crash_gen/service_manager.py | 10 +- tests/pytest/crash_gen/shared/misc.py | 3 +- 4 files changed, 115 insertions(+), 41 deletions(-) diff --git a/src/connector/python/taos/__init__.py b/src/connector/python/taos/__init__.py index 9732635738..52c6db311e 100644 --- a/src/connector/python/taos/__init__.py +++ b/src/connector/python/taos/__init__.py @@ -2,6 +2,10 @@ from .connection import TDengineConnection from .cursor import TDengineCursor +# For some reason, the following is needed for VS Code (through PyLance) to +# recognize that "error" is a valid module of the "taos" package. +from .error import ProgrammingError + # Globals threadsafety = 0 paramstyle = 'pyformat' diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 644aa79916..b743eee2ef 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -37,6 +37,7 @@ import requests import gc import taos + from .shared.types import TdColumns, TdTags # from crash_gen import ServiceManager, TdeInstance, TdeSubProcess @@ -160,6 +161,7 @@ class WorkerThread: Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") break + # Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more) try: if (Config.getConfig().per_thread_db_connection): # most likely TRUE @@ -1362,9 +1364,12 @@ class Task(): Progress.emit(Progress.ACCEPTABLE_ERROR) self._err = err else: # not an acceptable error - errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, msg: {}, SQL: {}".format( + shortTid = threading.get_ident() % 10000 + errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, thread={}, msg: {}, SQL: {}".format( self.__class__.__name__, - errno2, err, wt.getDbConn().getLastSql()) + errno2, + shortTid, + err, wt.getDbConn().getLastSql()) self.logDebug(errMsg) if Config.getConfig().debug: # raise # so that we see full stack @@ -1411,21 +1416,31 @@ class Task(): def lockTable(self, ftName): # full table name # print(" <<" + ftName + '_', end="", flush=True) - with Task._lock: - if not ftName in Task._tableLocks: + with Task._lock: # SHORT lock! so we only protect lock creation + if not ftName in Task._tableLocks: # Create new lock and add to list, if needed Task._tableLocks[ftName] = threading.Lock() - Task._tableLocks[ftName].acquire() + # No lock protection, anybody can do this any time + lock = Task._tableLocks[ftName] + # Logging.info("Acquiring lock: {}, {}".format(ftName, lock)) + lock.acquire() + # Logging.info("Acquiring lock successful: {}".format(lock)) def unlockTable(self, ftName): # print('_' + ftName + ">> ", end="", flush=True) - with Task._lock: + with Task._lock: if not ftName in self._tableLocks: raise RuntimeError("Corrupt state, no such lock") lock = Task._tableLocks[ftName] if not lock.locked(): raise RuntimeError("Corrupte state, already unlocked") - lock.release() + + # Important note, we want to protect unlocking under the task level + # locking, because we don't want the lock to be deleted (maybe in the futur) + # while we unlock it + # Logging.info("Releasing lock: {}".format(lock)) + lock.release() + # Logging.info("Releasing lock successful: {}".format(lock)) class ExecutionStats: @@ -1696,6 +1711,11 @@ class TdSuperTable: return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0 def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str): + ''' + Make sure a regular table exists for this super table, creating it if necessary. + If there is an associated "Task" that wants to do this, "lock" this table so that + others don't access it while we create it. + ''' dbName = self._dbName sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName) if dbc.query(sql) >= 1 : # reg table exists already @@ -1703,18 +1723,24 @@ class TdSuperTable: # acquire a lock first, so as to be able to *verify*. More details in TD-1471 fullTableName = dbName + '.' + regTableName - if task is not None: # TODO: what happens if we don't lock the table - task.lockTable(fullTableName) + if task is not None: # Somethime thie operation is requested on behalf of a "task" + # Logging.info("Locking table for creation: {}".format(fullTableName)) + task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access + # Logging.info("Table locked for creation".format(fullTableName)) Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table # print("(" + fullTableName[-3:] + ")", end="", flush=True) try: sql = "CREATE TABLE {} USING {}.{} tags ({})".format( fullTableName, dbName, self._stName, self._getTagStrForSql(dbc) ) + # Logging.info("Creating regular with SQL: {}".format(sql)) dbc.execute(sql) + # Logging.info("Regular table created: {}".format(sql)) finally: if task is not None: + # Logging.info("Unlocking table after creation: {}".format(fullTableName)) task.unlockTable(fullTableName) # no matter what + # Logging.info("Table unlocked after creation: {}".format(fullTableName)) def _getTagStrForSql(self, dbc) : tags = self._getTags(dbc) @@ -2011,9 +2037,30 @@ class TaskAddData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canAddData() + def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): + if Config.getConfig().verify_data: + # Logging.info("Locking table: {}".format(fullTableName)) + self.lockTable(fullTableName) + # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName)) + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + else: + # Logging.info("Skipping locking table") + pass + + def _unlockTableIfNeeded(self, fullTableName): + if Config.getConfig().verify_data: + # Logging.info("Unlocking table: {}".format(fullTableName)) + self.unlockTable(fullTableName) + # Logging.info("Table unlocked: {}".format(fullTableName)) + else: + pass + # Logging.info("Skipping unlocking table") + def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + fullTableName = db.getName() + '.' + regTableName + self._lockTableIfNeeded(fullTableName, 'batch') sql = "INSERT INTO {} VALUES ".format(fullTableName) for j in range(numRecords): # number of records per table @@ -2021,51 +2068,60 @@ class TaskAddData(StateTransitionTask): nextTick = db.getNextTick() nextColor = db.getNextColor() sql += "('{}', {}, '{}');".format(nextTick, nextInt, nextColor) - dbc.execute(sql) + + # Logging.info("Adding data in batch: {}".format(sql)) + try: + dbc.execute(sql) + finally: + # Logging.info("Data added in batch: {}".format(sql)) + self._unlockTableIfNeeded(fullTableName) + + def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS for j in range(numRecords): # number of records per table - nextInt = db.getNextInt() + intToWrite = db.getNextInt() nextTick = db.getNextTick() nextColor = db.getNextColor() if Config.getConfig().record_ops: self.prepToRecordOps() if self.fAddLogReady is None: raise CrashGenError("Unexpected empty fAddLogReady") - self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) + self.fAddLogReady.write("Ready to write {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - if Config.getConfig().verify_data: - self.lockTable(fullTableName) - # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written - + self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {}) fullTableName, # ds.getFixedSuperTableName(), # ds.getNextBinary(), ds.getNextFloat(), - nextTick, nextInt, nextColor) + nextTick, intToWrite, nextColor) + # Logging.info("Adding data: {}".format(sql)) dbc.execute(sql) + # Logging.info("Data added: {}".format(sql)) + intWrote = intToWrite # Quick hack, attach an update statement here. TODO: create an "update" task if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - nextInt = db.getNextInt() + intToUpdate = db.getNextInt() # Updated, but should not succeed nextColor = db.getNextColor() sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here fullTableName, - nextTick, nextInt, nextColor) + nextTick, intToUpdate, nextColor) # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format( # fullTableName, db.getNextInt(), db.getNextColor(), nextTick) dbc.execute(sql) + intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. except: # Any exception at all - if Config.getConfig().verify_data: - self.unlockTable(fullTableName) + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped @@ -2073,33 +2129,41 @@ class TaskAddData(StateTransitionTask): try: readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'". format(db.getName(), regTableName, nextTick)) - if readBack != nextInt : + if readBack != intWrote : raise taos.error.ProgrammingError( "Failed to read back same data, wrote: {}, read: {}" - .format(nextInt, readBack), 0x999) + .format(intWrote, readBack), 0x999) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [CrashGenError.INVALID_EMPTY_RESULT, CrashGenError.INVALID_MULTIPLE_RESULT] : # not a single result + if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result raise taos.error.ProgrammingError( - "Failed to read back same data for tick: {}, wrote: {}, read: {}" - .format(nextTick, nextInt, "Empty Result" if errno == CrashGenError.INVALID_EMPTY_RESULT else "Multiple Result"), + "Failed to read back same data for tick: {}, wrote: {}, read: EMPTY" + .format(nextTick, intWrote), + errno) + elif errno == CrashGenError.INVALID_MULTIPLE_RESULT : # multiple results + raise taos.error.ProgrammingError( + "Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS" + .format(nextTick, intWrote), errno) elif errno in [0x218, 0x362]: # table doesn't exist # do nothing - dummy = 0 + pass else: # Re-throw otherwise raise finally: - self.unlockTable(fullTableName) # Unlock the table no matter what + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + # Done with read-back verification, unlock the table now + else: + self._unlockTableIfNeeded(fullTableName) # Successfully wrote the data into the DB, let's record it somehow - te.recordDataMark(nextInt) + te.recordDataMark(intWrote) if Config.getConfig().record_ops: if self.fAddLogDone is None: raise CrashGenError("Unexpected empty fAddLogDone") - self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName)) + self.fAddLogDone.write("Wrote {} to {}\n".format(intWrote, regTableName)) self.fAddLogDone.flush() os.fsync(self.fAddLogDone.fileno()) @@ -2137,15 +2201,16 @@ class TaskAddData(StateTransitionTask): class ThreadStacks: # stack info for all threads def __init__(self): self._allStacks = {} - allFrames = sys._current_frames() - for th in threading.enumerate(): + allFrames = sys._current_frames() # All current stack frames + for th in threading.enumerate(): # For each thread if th.ident is None: continue - stack = traceback.extract_stack(allFrames[th.ident]) - self._allStacks[th.native_id] = stack + stack = traceback.extract_stack(allFrames[th.ident]) # Get stack for a thread + shortTid = th.ident % 10000 + self._allStacks[shortTid] = stack # Was using th.native_id def print(self, filteredEndName = None, filterInternal = False): - for thNid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom + for tIdent, stack in self._allStacks.items(): # for each thread, stack frames top to bottom lastFrame = stack[-1] if filteredEndName: # we need to filter out stacks that match this name if lastFrame.name == filteredEndName : # end did not match @@ -2157,7 +2222,7 @@ class ThreadStacks: # stack info for all threads '__init__']: # the thread that extracted the stack continue # ignore # Now print - print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(thNid)) + print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(tIdent)) stackFrame = 0 for frame in stack: # was using: reversed(stack) # print(frame) @@ -2376,7 +2441,7 @@ class MainExec: action='store', default=0, type=int, - help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)') + help='Number of DBs to use, set to disable dropping DB. (default: 0)') parser.add_argument( '-c', '--connector-type', diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py index 1cd65c1dde..c6685ec469 100644 --- a/tests/pytest/crash_gen/service_manager.py +++ b/tests/pytest/crash_gen/service_manager.py @@ -179,7 +179,7 @@ quorum 2 def getServiceCmdLine(self): # to start the instance if Config.getConfig().track_memory_leaks: Logging.info("Invoking VALGRIND on service...") - return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()] + return ['exec valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()] else: # TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() @@ -310,7 +310,7 @@ class TdeSubProcess: # print("Starting TDengine with env: ", myEnv.items()) print("Starting TDengine: {}".format(cmdLine)) - return Popen( + ret = Popen( ' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine, shell=True, # Always use shell, since we need to pass ENV vars stdout=PIPE, @@ -318,6 +318,10 @@ class TdeSubProcess: close_fds=ON_POSIX, env=myEnv ) # had text=True, which interferred with reading EOF + time.sleep(0.01) # very brief wait, then let's check if sub process started successfully. + if ret.poll(): + raise CrashGenError("Sub process failed to start with command line: {}".format(cmdLine)) + return ret STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm @@ -614,7 +618,7 @@ class ServiceManager: # Find if there's already a taosd service, and then kill it for proc in psutil.process_iter(): - if proc.name() == 'taosd': + if proc.name() == 'taosd' or proc.name() == 'memcheck-amd64-': # Regular or under Valgrind Logging.info("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt") time.sleep(2.0) proc.kill() diff --git a/tests/pytest/crash_gen/shared/misc.py b/tests/pytest/crash_gen/shared/misc.py index 90ad802ff1..78923bcc29 100644 --- a/tests/pytest/crash_gen/shared/misc.py +++ b/tests/pytest/crash_gen/shared/misc.py @@ -35,7 +35,8 @@ class LoggingFilter(logging.Filter): class MyLoggingAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): - return "[{:04d}] {}".format(threading.get_ident() % 10000, msg), kwargs + shortTid = threading.get_ident() % 10000 + return "[{:04d}] {}".format(shortTid, msg), kwargs # return '[%s] %s' % (self.extra['connid'], msg), kwargs From 597e11a4492d094f0b96186ad0180396de29644b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 31 May 2021 15:05:27 +0800 Subject: [PATCH 45/82] Hotfix/sangshuduo/td 4353 taosdemo subscribe resub (#6302) * [TD-4353]: taosdemo resub if resubAfterConsume != -1 * fix but resubafter use endafter section. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 993bb40ee2..dcfed8bc2a 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -4401,8 +4401,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* resubAfterConsume = cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); - if (resubAfterConsume - && resubAfterConsume->type == cJSON_Number) { + if ((resubAfterConsume) + && (resubAfterConsume->type == cJSON_Number) + && (resubAfterConsume->valueint >= 0)) { g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = resubAfterConsume->valueint; } else if (!resubAfterConsume) { @@ -4563,14 +4564,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } cJSON* superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "endAfterConsume"); - if (superResubAfterConsume - && superResubAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = + cJSON_GetObjectItem(superQuery, "resubAfterConsume"); + if ((superResubAfterConsume) + && (superResubAfterConsume->type == cJSON_Number) + && (superResubAfterConsume->valueint >= 0)) { + g_queryInfo.superQueryInfo.resubAfterConsume = superResubAfterConsume->valueint; } else if (!superResubAfterConsume) { // default value is -1, which mean do not resub - g_queryInfo.superQueryInfo.endAfterConsume = -1; + g_queryInfo.superQueryInfo.resubAfterConsume = -1; } // supert table sqls From ed9ff8917c03135e32a69f5a5df988024a216c41 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 31 May 2021 07:33:08 +0000 Subject: [PATCH 46/82] [TD-4447]import the same csv twice --- tests/pytest/import_merge/importCSV.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/pytest/import_merge/importCSV.py b/tests/pytest/import_merge/importCSV.py index b4441949a1..24ebffd485 100644 --- a/tests/pytest/import_merge/importCSV.py +++ b/tests/pytest/import_merge/importCSV.py @@ -82,6 +82,8 @@ class TDTestCase: tdSql.execute("import into tbx file \'%s\'"%(self.csvfile)) tdSql.query('select * from tbx') tdSql.checkRows(self.rows) + #TD-4447 import the same csv twice + tdSql.execute("import into tbx file \'%s\'"%(self.csvfile)) def stop(self): self.destroyCSVFile() From 424cd0123b52eaf6b0e54947c812b08947cae108 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 31 May 2021 15:42:50 +0800 Subject: [PATCH 47/82] fix windows compile error --- src/client/src/tscSQLParser.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b0ffab1298..978fbaf521 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5353,9 +5353,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } pItem = taosArrayGet(pAlterSQL->pAddColumns, 1); - int64_t nlen = 0; + int16_t nlen = 0; - if (tVariantDump(&pItem->pVar, (char *)&nlen, TSDB_DATA_TYPE_BIGINT, false) < 0 || nlen <= 0) { + if (tVariantDump(&pItem->pVar, (char *)&nlen, TSDB_DATA_TYPE_SMALLINT, false) < 0 || nlen <= 0) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22); } From 37bde37c8532998aeb4752589266dd495a75c56e Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Mon, 31 May 2021 15:48:53 +0800 Subject: [PATCH 48/82] fix join time precision issue --- src/client/src/tscSubquery.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 3cb2b60ce2..08ade3acc5 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1489,6 +1489,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR SSqlRes* pRes1 = &pParentSql->pSubs[i]->res; + pParentSql->res.precision = pRes1->precision; + if (pRes1->row > 0 && pRes1->numOfRows > 0) { tscDebug("0x%"PRIx64" sub:%p index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, pParentSql->pSubs[i], i, pRes1->numOfRows, pRes1->numOfTotal); From d5965c461a1bc0519fee465c438b7b3e078299ce Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Mon, 31 May 2021 15:51:43 +0800 Subject: [PATCH 49/82] must ltime >0 to replace --- src/client/src/tscStream.c | 4 ++-- tests/pytest/util/dnodes.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 2226c3d95d..3998f99afe 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -576,8 +576,8 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { // set stime with ltime if ltime > stime const char* dstTable = pStream->dstTable? pStream->dstTable: ""; tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime); - if(pStream->ltime > pStream->stime) { - tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" ", dstTable, pStream->stime, pStream->ltime); + if(pStream->ltime > 0 && pStream->ltime > pStream->stime) { + tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime>0 ", dstTable, pStream->stime, pStream->ltime); pStream->stime = pStream->ltime; } diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 6eaf4e18af..0f71ffd0a3 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -432,7 +432,7 @@ class TDDnodes: self.simDeployed = False def init(self, path): - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): killCmd = "kill -TERM %s > /dev/null 2>&1" % processID @@ -545,14 +545,14 @@ class TDDnodes: for i in range(len(self.dnodes)): self.dnodes[i].stop() - psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") if processID: cmd = "sudo systemctl stop taosd" os.system(cmd) # if os.system(cmd) != 0 : # tdLog.exit(cmd) - psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): killCmd = "kill -TERM %s > /dev/null 2>&1" % processID From e4a6d251d91892d8328460b1d09f501fa920d33e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 31 May 2021 18:00:14 +0800 Subject: [PATCH 50/82] [TD-4294] --- tests/pytest/functions/function_session.py | 85 ++++++++++++++ .../pytest/functions/function_stateWindow.py | 109 ++++++++++++++++++ 2 files changed, 194 insertions(+) create mode 100644 tests/pytest/functions/function_session.py create mode 100644 tests/pytest/functions/function_stateWindow.py diff --git a/tests/pytest/functions/function_session.py b/tests/pytest/functions/function_session.py new file mode 100644 index 0000000000..9165440737 --- /dev/null +++ b/tests/pytest/functions/function_session.py @@ -0,0 +1,85 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +#import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + # operation not allowed on super table + tdSql.error("select count(*) from test session(ts, 1s)") + # operation not allowde on col pro + tdSql.error("select * from test1 session(ts, 1s)") + # operation not allowed on col except primary ts + tdSql.error("select * from test1 session(col1, 1s)") + + tdSql.query("select count(*) from test1 session(ts, 1s)") + + tdSql.checkRows(1) + tdSql.checkData(0, 1, 10) + # append more data + + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + 2000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.query("select count(*) from test1 session(ts, 1s)") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 10) + tdSql.checkData(1, 1, 1) + + tdSql.query("select count(*) from test1 session(ts, 1m)") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 11) + + tdSql.query("select first(col1) from test1 session(ts, 1s)") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 1) + + tdSql.query("select first(col1), last(col2) from test1 session(ts, 1s)") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 10) + tdSql.checkData(1, 1, 1) + tdSql.checkData(1, 1, 1) + + # add more function + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_stateWindow.py b/tests/pytest/functions/function_stateWindow.py new file mode 100644 index 0000000000..8f05b32164 --- /dev/null +++ b/tests/pytest/functions/function_stateWindow.py @@ -0,0 +1,109 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +#import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + col0 = 0 + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, col0, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + # operation not allowed on super table + tdSql.error("select count(*) from test session(ts, 1s)") + # operation not allowde on col pro + tdSql.error("select * from test1 session(ts, 1s)") + # operation not allowed on col except primary ts + tdSql.error("select * from test1 session(col1, 1s)") + + tdSql.query("select count(*) from test1 state_window(col1)") + + tdSql.checkRows(1) + tdSql.checkData(0, 0, self.rowNum) + # append more data + + col0 = col0 + 1 + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i + 10000, col0, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.query("select count(*) from test1 state_window(col1)") + + tdSql.checkRows(2) + tdSql.checkData(0, 0, self.rowNum) + tdSql.checkData(1, 0, self.rowNum) + + + tdSql.query("select first(col1) from test1 state_window(col1)") + tdSql.checkRows(2) + col0 = col0 - 1 + tdSql.checkData(0, 0, col0) + col0 = col0 + 1 + tdSql.checkData(1, 0, col0) + + tdSql.query("select first(col2) from test1 state_window(col1)") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 0, 1) + + tdSql.query("select count(col1), first(col2) from test1 state_window(col1)") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 1, 1) + + tdSql.checkData(1, 0, 10) + tdSql.checkData(1, 1, 1) + + + #tdSql.query("select count(*) from test1 session(ts, 1m)") + #tdSql.checkRows(1) + #tdSql.checkData(0, 1, 11) + + #tdSql.query("select first(col1) from test1 session(ts, 1s)") + #tdSql.checkRows(2) + #tdSql.checkData(0, 1, 1) + #tdSql.checkData(1, 1, 1) + + #tdSql.query("select first(col1), last(col2) from test1 session(ts, 1s)") + #tdSql.checkRows(2) + #tdSql.checkData(0, 1, 1) + #tdSql.checkData(0, 2, 10) + #tdSql.checkData(1, 1, 1) + #tdSql.checkData(1, 1, 1) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From f6e0d1926f51961a5c0d2d1f875cfb7c32d8c074 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 31 May 2021 18:45:35 +0800 Subject: [PATCH 51/82] [TD-4294] --- tests/pytest/fulltest.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c5aba24867..f069897ea9 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -314,6 +314,8 @@ python3 ./test.py -f query/last_row_cache.py python3 ./test.py -f account/account_create.py python3 ./test.py -f alter/alter_table.py python3 ./test.py -f query/queryGroupbySort.py +python3 ./test.py -f functions/function_session.py +python3 ./test.py -f functions/function_stateWindow.py python3 ./test.py -f insert/unsignedInt.py python3 ./test.py -f insert/unsignedBigint.py From edd05ea486d618af8bbb7b8baaae115253ec7b09 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Mon, 31 May 2021 19:49:47 +0800 Subject: [PATCH 52/82] INT64_MIN default to ltime --- src/client/src/tscStream.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 3998f99afe..0401d1f3b2 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -576,7 +576,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { // set stime with ltime if ltime > stime const char* dstTable = pStream->dstTable? pStream->dstTable: ""; tscDebug(" CQ table=%s ltime is %"PRId64, dstTable, pStream->ltime); - if(pStream->ltime > 0 && pStream->ltime > pStream->stime) { + if(pStream->ltime != INT64_MIN && pStream->ltime > pStream->stime) { tscWarn(" CQ set stream %s stime=%"PRId64" replace with ltime=%"PRId64" if ltime>0 ", dstTable, pStream->stime, pStream->ltime); pStream->stime = pStream->ltime; } @@ -678,6 +678,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c return NULL; } + pStream->ltime = INT64_MIN; pStream->stime = stime; pStream->fp = fp; pStream->callback = callback; From 8543ac4e65ec0899837a6fd2c4944f6b1e888442 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 31 May 2021 20:48:09 +0800 Subject: [PATCH 53/82] [TD-4294] --- tests/pytest/functions/function_session.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/functions/function_session.py b/tests/pytest/functions/function_session.py index 9165440737..21b6d088ff 100644 --- a/tests/pytest/functions/function_session.py +++ b/tests/pytest/functions/function_session.py @@ -68,6 +68,7 @@ class TDTestCase: tdSql.checkData(0, 1, 1) tdSql.checkData(1, 1, 1) + tdSql.query("select first(col1), last(col2) from test1 session(ts, 1s)") tdSql.checkRows(2) tdSql.checkData(0, 1, 1) From 9c8507cd7c17d0419b45f268ed757170f23a4755 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 31 May 2021 23:29:59 +0800 Subject: [PATCH 54/82] [TD-4506] return valid msg when no db specified --- src/client/src/tscSQLParser.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index f34434e66c..d255abe602 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7186,8 +7186,9 @@ static int32_t getTableNameFromSqlNode(SSqlNode* pSqlNode, SArray* tableNameList } SName name = {0}; - if (tscSetTableFullName(&name, t, pSql) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(msgBuf, msg1); + int32_t code = tscSetTableFullName(&name, t, pSql); + if (code != TSDB_CODE_SUCCESS) { + return code; } taosArrayPush(tableNameList, &name); From ce7bfcf02452a206c7236ceb9daab399585f5298 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 1 Jun 2021 08:28:45 +0800 Subject: [PATCH 55/82] fix case issue --- tests/pytest/query/queryInsertValue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/query/queryInsertValue.py b/tests/pytest/query/queryInsertValue.py index 856801b4ee..a6b2a88008 100644 --- a/tests/pytest/query/queryInsertValue.py +++ b/tests/pytest/query/queryInsertValue.py @@ -45,7 +45,7 @@ class TDTestCase: tdSql.query("select * from st") tdSql.checkRows(1) - tdSql.execute("alter table st add column length int") + tdSql.execute("alter table st add column len int") tdSql.execute("insert into t1 values(now, 1, 2)") tdSql.query("select last(*) from st") tdSql.checkData(0, 2, 2); From 764087a06497edd5df72e1857eb03ccc7bf91af2 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 1 Jun 2021 11:39:28 +0800 Subject: [PATCH 56/82] support bind multiple tables --- src/client/src/tscPrepare.c | 1 - tests/script/api/stmtBatchTest.c | 917 ++++++++++++++++++++++++++++++- 2 files changed, 909 insertions(+), 9 deletions(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 312a570f2c..e395f09bae 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1561,7 +1561,6 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags } pStmt->mtb.nameSet = true; - pStmt->mtb.tagSet = true; tscDebug("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); diff --git a/tests/script/api/stmtBatchTest.c b/tests/script/api/stmtBatchTest.c index 8bd296db61..80848febd1 100644 --- a/tests/script/api/stmtBatchTest.c +++ b/tests/script/api/stmtBatchTest.c @@ -1450,6 +1450,47 @@ static void prepareV_long(TAOS *taos, int schemaCase, int tableNum, int lenO } +static void prepareVcolumn_autoCreateTbl(TAOS *taos, int schemaCase, int tableNum, int lenOfBinaryDef, char* dbName) { + TAOS_RES *result; + int code; + char sqlstr[1024] = {0}; + sprintf(sqlstr, "drop database if exists %s;", dbName); + result = taos_query(taos, sqlstr); + taos_free_result(result); + + sprintf(sqlstr, "create database %s;", dbName); + result = taos_query(taos, sqlstr); + code = taos_errno(result); + if (code != 0) { + printf("failed to create database, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + + sprintf(sqlstr, "use %s;", dbName); + result = taos_query(taos, sqlstr); + taos_free_result(result); + + // create table + char buf[1024] = {0}; + //if (bigsize) { + sprintf(buf, "create stable stb1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp)" + " tags(id1 int, id2 bool, id3 tinyint, id4 smallint, id5 bigint, id6 float, id7 double, id8 binary(%d), id9 nchar(%d))", lenOfBinaryDef, lenOfBinaryDef, lenOfBinaryDef, lenOfBinaryDef) ; + //} else { + // sprintf(buf, "create stable stb1 (ts timestamp, b int) tags(id1 int, id2 bool, id3 tinyint, id4 smallint, id5 bigint, id6 float, id7 double, id8 binary(40), id9 nchar(40))") ; + //} + + result = taos_query(taos, buf); + code = taos_errno(result); + if (code != 0) { + printf("failed to create table, reason:%s\n", taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); +} + static void prepareVcolumn(TAOS *taos, int schemaCase, int tableNum, int lenOfBinaryDef, char* dbName) { TAOS_RES *result; @@ -3159,12 +3200,872 @@ static void SpecifyColumnBatchCase(TAOS *taos) { } + +/*=======================*/ +/* +test scene: insert into tb1 (ts,f1) values (?,?) +*/ +static int stmt_specifyCol_bind_case_001_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { + sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue)); + + int totalRowsPerTbl = rowsOfPerColum * bingNum; + + v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum)); + v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + + int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int)); + + TAOS_BIND *tags = calloc(1, sizeof(TAOS_BIND) * 9 * 1); + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum)); + char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + int one_not_null = 0; + + int64_t tts = 1591060628000; + + for (int i = 0; i < rowsOfPerColum; ++i) { + lb[i] = lenOfBinaryAct; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v->b[i] = (int8_t)(i % 2); + v->v1[i] = (int8_t)((i+1) % 2); + v->v2[i] = (int16_t)i; + v->v4[i] = (int32_t)(i+1); + v->v8[i] = (int64_t)(i+2); + v->f4[i] = (float)(i+3); + v->f8[i] = (double)(i+4); + char tbuf[MAX_BINARY_DEF_LEN]; + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "binary-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "nchar-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + v->ts2[i] = tts + i; + } + + int i = 0; + for (int j = 0; j < bingNum * tableNum; j++) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v->ts[j*rowsOfPerColum]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = rowsOfPerColum; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v->b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = rowsOfPerColum; + + params[i+2].buffer_type = TSDB_DATA_TYPE_INT; + params[i+2].buffer_length = sizeof(int32_t); + params[i+2].buffer = v->v4; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = rowsOfPerColum; + + params[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+3].buffer_length = sizeof(float); + params[i+3].buffer = v->f4; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = rowsOfPerColum; + + params[i+4].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+4].buffer_length = (uintptr_t)lenOfBinaryDef; + params[i+4].buffer = v->br; + params[i+4].length = lb; + params[i+4].is_null = is_null; + params[i+4].num = rowsOfPerColum; + + i+=columnNum; + } + + //int64_t tts = 1591060628000; + for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) { + v->ts[i] = tts + i; + } + + for (int i = 0; i < 1; ++i) { + tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; + tags[i+0].buffer = v->v4; + tags[i+0].is_null = &one_not_null; + tags[i+0].length = NULL; + + tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + tags[i+1].buffer = v->b; + tags[i+1].is_null = &one_not_null; + tags[i+1].length = NULL; + + tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + tags[i+2].buffer = v->v1; + tags[i+2].is_null = &one_not_null; + tags[i+2].length = NULL; + + tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + tags[i+3].buffer = v->v2; + tags[i+3].is_null = &one_not_null; + tags[i+3].length = NULL; + + tags[i+4].buffer_type = TSDB_DATA_TYPE_BIGINT; + tags[i+4].buffer = v->v8; + tags[i+4].is_null = &one_not_null; + tags[i+4].length = NULL; + + tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT; + tags[i+5].buffer = v->f4; + tags[i+5].is_null = &one_not_null; + tags[i+5].length = NULL; + + tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE; + tags[i+6].buffer = v->f8; + tags[i+6].is_null = &one_not_null; + tags[i+6].length = NULL; + + tags[i+7].buffer_type = TSDB_DATA_TYPE_BINARY; + tags[i+7].buffer = v->br; + tags[i+7].is_null = &one_not_null; + tags[i+7].length = (uintptr_t *)lb; + + tags[i+8].buffer_type = TSDB_DATA_TYPE_NCHAR; + tags[i+8].buffer = v->nr; + tags[i+8].is_null = &one_not_null; + tags[i+8].length = (uintptr_t *)lb; + } + + + unsigned long long starttime = getCurrentTime(); + +// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp) + //char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)"; + char *sql = "insert into ? using stb1 tags (?,?,?,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; + + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + int id = 0; + for (int l = 0; l < bingNum; l++) { + for (int zz = 0; zz < tableNum; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname_tags(stmt, buf, tags); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + for (int col=0; col < columnNum; ++col) { + code = taos_stmt_bind_single_param_batch(stmt, params + id, col); + if (code != 0){ + printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + id++; + } + + code = taos_stmt_add_batch(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + code = taos_stmt_execute(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + unsigned long long endtime = getCurrentTime(); + unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum); + printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows); + + free(v->ts); + free(v->br); + free(v->nr); + free(v); + free(lb); + free(params); + free(tags); + free(is_null); + free(no_null); + + return 0; +} + +static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { + sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue)); + + int totalRowsPerTbl = rowsOfPerColum * bingNum; + + v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum)); + v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + + int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int)); + + TAOS_BIND *tags = calloc(1, sizeof(TAOS_BIND) * 9 * 1); + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum)); + char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + int one_not_null = 0; + + int64_t tts = 1591060628000; + + for (int i = 0; i < rowsOfPerColum; ++i) { + lb[i] = lenOfBinaryAct; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v->b[i] = (int8_t)(i % 2); + v->v1[i] = (int8_t)((i+1) % 2); + v->v2[i] = (int16_t)i; + v->v4[i] = (int32_t)(i+1); + v->v8[i] = (int64_t)(i+2); + v->f4[i] = (float)(i+3); + v->f8[i] = (double)(i+4); + char tbuf[MAX_BINARY_DEF_LEN]; + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "binary-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "nchar-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + v->ts2[i] = tts + i; + } + + int i = 0; + for (int j = 0; j < bingNum * tableNum; j++) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v->ts[j*rowsOfPerColum]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = rowsOfPerColum; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v->b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = rowsOfPerColum; + + params[i+2].buffer_type = TSDB_DATA_TYPE_INT; + params[i+2].buffer_length = sizeof(int32_t); + params[i+2].buffer = v->v4; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = rowsOfPerColum; + + params[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+3].buffer_length = sizeof(float); + params[i+3].buffer = v->f4; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = rowsOfPerColum; + + params[i+4].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+4].buffer_length = (uintptr_t)lenOfBinaryDef; + params[i+4].buffer = v->br; + params[i+4].length = lb; + params[i+4].is_null = is_null; + params[i+4].num = rowsOfPerColum; + + i+=columnNum; + } + + //int64_t tts = 1591060628000; + for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) { + v->ts[i] = tts + i; + } + + for (int i = 0; i < 1; ++i) { + tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; + tags[i+0].buffer = v->v4; + tags[i+0].is_null = &one_not_null; + tags[i+0].length = NULL; + + tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + tags[i+1].buffer = v->b; + tags[i+1].is_null = &one_not_null; + tags[i+1].length = NULL; + + tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + tags[i+2].buffer = v->v1; + tags[i+2].is_null = &one_not_null; + tags[i+2].length = NULL; + + tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + tags[i+3].buffer = v->v2; + tags[i+3].is_null = &one_not_null; + tags[i+3].length = NULL; + + tags[i+4].buffer_type = TSDB_DATA_TYPE_BIGINT; + tags[i+4].buffer = v->v8; + tags[i+4].is_null = &one_not_null; + tags[i+4].length = NULL; + + tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT; + tags[i+5].buffer = v->f4; + tags[i+5].is_null = &one_not_null; + tags[i+5].length = NULL; + + tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE; + tags[i+6].buffer = v->f8; + tags[i+6].is_null = &one_not_null; + tags[i+6].length = NULL; + + tags[i+7].buffer_type = TSDB_DATA_TYPE_BINARY; + tags[i+7].buffer = v->br; + tags[i+7].is_null = &one_not_null; + tags[i+7].length = (uintptr_t *)lb; + + tags[i+8].buffer_type = TSDB_DATA_TYPE_NCHAR; + tags[i+8].buffer = v->nr; + tags[i+8].is_null = &one_not_null; + tags[i+8].length = (uintptr_t *)lb; + } + + + unsigned long long starttime = getCurrentTime(); + +// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp) + //char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)"; + char *sql = "insert into ? using stb1 tags (?,?,?,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; + + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + int id = 0; + for (int l = 0; l < bingNum; l++) { + for (int zz = 0; zz < tableNum; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname_tags(stmt, buf, tags); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + for (int col=0; col < columnNum; ++col) { + code = taos_stmt_bind_single_param_batch(stmt, params + id, col); + if (code != 0){ + printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + id++; + } + + code = taos_stmt_add_batch(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + code = taos_stmt_execute(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + unsigned long long endtime = getCurrentTime(); + unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum); + printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows); + + free(v->ts); + free(v->br); + free(v->nr); + free(v); + free(lb); + free(params); + free(tags); + free(is_null); + free(no_null); + + return 0; +} + +static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { + sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue)); + + int totalRowsPerTbl = rowsOfPerColum * bingNum; + + v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum)); + v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + + int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int)); + + TAOS_BIND *tags = calloc(1, sizeof(TAOS_BIND) * 9 * 1); + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum)); + char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + int one_not_null = 0; + + int64_t tts = 1591060628000; + + for (int i = 0; i < rowsOfPerColum; ++i) { + lb[i] = lenOfBinaryAct; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v->b[i] = (int8_t)(i % 2); + v->v1[i] = (int8_t)((i+1) % 2); + v->v2[i] = (int16_t)i; + v->v4[i] = (int32_t)(i+1); + v->v8[i] = (int64_t)(i+2); + v->f4[i] = (float)(i+3); + v->f8[i] = (double)(i+4); + char tbuf[MAX_BINARY_DEF_LEN]; + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "binary-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "nchar-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + v->ts2[i] = tts + i; + } + + int i = 0; + for (int j = 0; j < bingNum * tableNum; j++) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v->ts[j*rowsOfPerColum]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = rowsOfPerColum; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v->b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = rowsOfPerColum; + + params[i+2].buffer_type = TSDB_DATA_TYPE_INT; + params[i+2].buffer_length = sizeof(int32_t); + params[i+2].buffer = v->v4; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = rowsOfPerColum; + + params[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+3].buffer_length = sizeof(float); + params[i+3].buffer = v->f4; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = rowsOfPerColum; + + params[i+4].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+4].buffer_length = (uintptr_t)lenOfBinaryDef; + params[i+4].buffer = v->br; + params[i+4].length = lb; + params[i+4].is_null = is_null; + params[i+4].num = rowsOfPerColum; + + i+=columnNum; + } + + //int64_t tts = 1591060628000; + for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) { + v->ts[i] = tts + i; + } + + for (int i = 0; i < 1; ++i) { + tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; + tags[i+0].buffer = v->v4; + tags[i+0].is_null = &one_not_null; + tags[i+0].length = NULL; + + tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + tags[i+1].buffer = v->b; + tags[i+1].is_null = &one_not_null; + tags[i+1].length = NULL; + + tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + tags[i+2].buffer = v->v1; + tags[i+2].is_null = &one_not_null; + tags[i+2].length = NULL; + + tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; + tags[i+3].buffer = v->v2; + tags[i+3].is_null = &one_not_null; + tags[i+3].length = NULL; + + tags[i+4].buffer_type = TSDB_DATA_TYPE_BIGINT; + tags[i+4].buffer = v->v8; + tags[i+4].is_null = &one_not_null; + tags[i+4].length = NULL; + + tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT; + tags[i+5].buffer = v->f4; + tags[i+5].is_null = &one_not_null; + tags[i+5].length = NULL; + + tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE; + tags[i+6].buffer = v->f8; + tags[i+6].is_null = &one_not_null; + tags[i+6].length = NULL; + + tags[i+7].buffer_type = TSDB_DATA_TYPE_BINARY; + tags[i+7].buffer = v->br; + tags[i+7].is_null = &one_not_null; + tags[i+7].length = (uintptr_t *)lb; + + tags[i+8].buffer_type = TSDB_DATA_TYPE_NCHAR; + tags[i+8].buffer = v->nr; + tags[i+8].is_null = &one_not_null; + tags[i+8].length = (uintptr_t *)lb; + } + + + unsigned long long starttime = getCurrentTime(); + +// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp) + //char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)"; + char *sql = "insert into ? using stb1 tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; + + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + int id = 0; + for (int l = 0; l < bingNum; l++) { + for (int zz = 0; zz < tableNum; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname_tags(stmt, buf, tags); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + for (int col=0; col < columnNum; ++col) { + code = taos_stmt_bind_single_param_batch(stmt, params + id, col); + if (code != 0){ + printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + id++; + } + + code = taos_stmt_add_batch(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + code = taos_stmt_execute(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + unsigned long long endtime = getCurrentTime(); + unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum); + printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows); + + free(v->ts); + free(v->br); + free(v->nr); + free(v); + free(lb); + free(params); + free(tags); + free(is_null); + free(no_null); + + return 0; +} + +static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { + TAOS_STMT *stmt = NULL; + + int tableNum; + int lenOfBinaryDef; + int rowsOfPerColum; + int bingNum; + int lenOfBinaryAct; + int columnNum; + + int totalRowsPerTbl; + +//=======================================================================// +//=============================== single table ==========================// +//========== case 1: ======================// +#if 0 +{ + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 1; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db1"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 1 check result end\n\n"); +} +#endif + + //========== case 2: ======================// +#if 0 +{ + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 5; + bingNum = 1; + lenOfBinaryDef = 1000; + lenOfBinaryAct = 15; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db2"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + //checkResult(taos, "m1", 0, totalRowsPerTbl); + //checkResult(taos, "m2", 0, totalRowsPerTbl); + //checkResult(taos, "m3", 0, totalRowsPerTbl); + //checkResult(taos, "m4", 0, totalRowsPerTbl); + //checkResult(taos, "m5", 0, totalRowsPerTbl); + //checkResult(taos, "m6", 0, totalRowsPerTbl); + //checkResult(taos, "m7", 0, totalRowsPerTbl); + //checkResult(taos, "m8", 0, totalRowsPerTbl); + //checkResult(taos, "m9", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 2 check result end\n\n"); +} +#endif + + //========== case 2-1: ======================// +#if 0 + { + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 32767; + bingNum = 1; + lenOfBinaryDef = 1000; + lenOfBinaryAct = 15; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db2_1"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + //checkResult(taos, "m1", 0, totalRowsPerTbl); + //checkResult(taos, "m2", 0, totalRowsPerTbl); + //checkResult(taos, "m3", 0, totalRowsPerTbl); + //checkResult(taos, "m4", 0, totalRowsPerTbl); + //checkResult(taos, "m5", 0, totalRowsPerTbl); + //checkResult(taos, "m6", 0, totalRowsPerTbl); + //checkResult(taos, "m7", 0, totalRowsPerTbl); + //checkResult(taos, "m8", 0, totalRowsPerTbl); + //checkResult(taos, "m9", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 2-1 check result end\n\n"); + } +#endif + //========== case 2-2: ======================// +#if 0 + { + printf("====case 2-2 error test start\n"); + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 32768; + bingNum = 1; + lenOfBinaryDef = 1000; + lenOfBinaryAct = 15; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db2_2"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + //checkResult(taos, "m1", 0, totalRowsPerTbl); + //checkResult(taos, "m2", 0, totalRowsPerTbl); + //checkResult(taos, "m3", 0, totalRowsPerTbl); + //checkResult(taos, "m4", 0, totalRowsPerTbl); + //checkResult(taos, "m5", 0, totalRowsPerTbl); + //checkResult(taos, "m6", 0, totalRowsPerTbl); + //checkResult(taos, "m7", 0, totalRowsPerTbl); + //checkResult(taos, "m8", 0, totalRowsPerTbl); + //checkResult(taos, "m9", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("====case 2-2 check result end\n\n"); + } +#endif + + + //========== case 3: ======================// +#if 0 + { + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 1; + bingNum = 5; + lenOfBinaryDef = 1000; + lenOfBinaryAct = 20; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db3"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + //checkResult(taos, "m1", 0, totalRowsPerTbl); + //checkResult(taos, "m2", 0, totalRowsPerTbl); + //checkResult(taos, "m3", 0, totalRowsPerTbl); + //checkResult(taos, "m4", 0, totalRowsPerTbl); + //checkResult(taos, "m5", 0, totalRowsPerTbl); + //checkResult(taos, "m6", 0, totalRowsPerTbl); + //checkResult(taos, "m7", 0, totalRowsPerTbl); + //checkResult(taos, "m8", 0, totalRowsPerTbl); + //checkResult(taos, "m9", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 3 check result end\n\n"); + } +#endif + + //========== case 4: ======================// +#if 0 + { + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 5; + bingNum = 5; + lenOfBinaryDef = 1000; + lenOfBinaryAct = 33; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db4"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + //checkResult(taos, "m1", 0, totalRowsPerTbl); + //checkResult(taos, "m2", 0, totalRowsPerTbl); + //checkResult(taos, "m3", 0, totalRowsPerTbl); + //checkResult(taos, "m4", 0, totalRowsPerTbl); + //checkResult(taos, "m5", 0, totalRowsPerTbl); + //checkResult(taos, "m6", 0, totalRowsPerTbl); + //checkResult(taos, "m7", 0, totalRowsPerTbl); + //checkResult(taos, "m8", 0, totalRowsPerTbl); + //checkResult(taos, "m9", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 4 check result end\n\n"); + } +#endif + + //=======================================================================// + //=============================== multi-rows to single table ==========================// + //========== case 5: ======================// +#if 0 + { + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 23740; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db5"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 5 check result end\n\n"); + } +#endif + +// ============== error test: 1.multi table, 2.specify some tags + //========== case 6: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 2; + rowsOfPerColum = 5; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6"); + stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m1", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 6 check result end\n\n"); + } +#endif + + //========== case 7: ======================// +#if 0 + { + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 23740; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 1; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6"); + stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 7 check result end\n\n"); + } +#endif + + return ; + +} + + int main(int argc, char *argv[]) { TAOS *taos; char host[32] = "127.0.0.1"; char* serverIp = NULL; - int threadNum = 1; + //int threadNum = 1; // connect to server if (argc == 1) { @@ -3173,10 +4074,10 @@ int main(int argc, char *argv[]) serverIp = argv[1]; } else if (argc == 3) { serverIp = argv[1]; - threadNum = atoi(argv[2]); + //threadNum = atoi(argv[2]); } else if (argc == 4) { serverIp = argv[1]; - threadNum = atoi(argv[2]); + //threadNum = atoi(argv[2]); g_runTimes = atoi(argv[3]); } @@ -3219,12 +4120,12 @@ int main(int argc, char *argv[]) if (taos == NULL) { printf("failed to connect to TDengine, reason:%s\n", taos_errstr(taos)); return -1; - } + } - runCase(taos); - runCase_long(taos); - SpecifyColumnBatchCase(taos); - + //runCase(taos); + //runCase_long(taos); + //SpecifyColumnBatchCase(taos); + SpecifyColumnBatchCase_autoCreateTbl(taos); return 0; } From 3d2b864d7c3877976c9f3098040dcc40c7e5de51 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Tue, 1 Jun 2021 14:45:50 +0800 Subject: [PATCH 57/82] fix sqlstr released issue --- src/client/src/tscPrepare.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index e395f09bae..8966f3234a 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -48,6 +48,7 @@ typedef struct SMultiTbStmt { bool nameSet; bool tagSet; uint64_t currentUid; + char *sqlstr; uint32_t tbNum; SStrToken tbname; SStrToken stbname; @@ -1291,6 +1292,7 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) { } pStmt->mtb.values = sToken; + } return TSDB_CODE_SUCCESS; @@ -1373,7 +1375,12 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO break; } - free(pSql->sqlstr); + if (pStmt->mtb.sqlstr == NULL) { + pStmt->mtb.sqlstr = pSql->sqlstr; + } else { + tfree(pSql->sqlstr); + } + pSql->sqlstr = str; return TSDB_CODE_SUCCESS; @@ -1633,6 +1640,7 @@ int taos_stmt_close(TAOS_STMT* stmt) { taosHashCleanup(pStmt->pSql->cmd.insertParam.pTableBlockHashList); pStmt->pSql->cmd.insertParam.pTableBlockHashList = NULL; taosArrayDestroy(pStmt->mtb.tags); + tfree(pStmt->mtb.sqlstr); } } From 469f709521dc1a1a5e0fc1aef9c522a52709ae9e Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Tue, 1 Jun 2021 16:58:27 +0800 Subject: [PATCH 58/82] [TD-4474] adding test case for testing lastRow hot alter --- tests/pytest/alter/alter_cacheLastRow.py | 106 ++++++++++++++++++ tests/pytest/fulltest.sh | 2 +- .../tools/taosdemoAllTest/insert_5M_rows.json | 60 ++++++++++ 3 files changed, 167 insertions(+), 1 deletion(-) create mode 100644 tests/pytest/alter/alter_cacheLastRow.py create mode 100644 tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json diff --git a/tests/pytest/alter/alter_cacheLastRow.py b/tests/pytest/alter/alter_cacheLastRow.py new file mode 100644 index 0000000000..cd08fe4f37 --- /dev/null +++ b/tests/pytest/alter/alter_cacheLastRow.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes +from datetime import datetime + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + tdSql.query('show databases') + tdSql.checkData(0,15,0) + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + os.system("%staosdemo -f tools/taosdemoAllTest/insert_100M_rows.json -y " % binPath) + # tdSql.query('select * from stb') + # tdSql.checkRows(1000*10000) + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.execute('use db') + # tdSql.query('select * from stb') + # tdSql.checkRows(1000*10000) + tableName = [] + for i in range(500): + tableName.append(f"stb_{i}") + + tdSql.execute('use db') + lastRow_Off_start = datetime.now() + + slow = 0 + for i in range(5): + for i in range(100): + for i in range(500): + tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}') + lastRow_Off_end = datetime.now() + + tdLog.debug(f'time used:{lastRow_Off_end-lastRow_Off_start}') + + tdSql.execute('alter database db cachelast 1') + tdSql.query('show databases') + tdSql.checkData(0,15,1) + + # tdDnodes.stop(1) + # tdDnodes.start(1) + + tdSql.execute('use db') + lastRow_On_start = datetime.now() + for i in range(100): + for i in range(500): + tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}') + lastRow_On_end = datetime.now() + + tdLog.debug(f'time used:{lastRow_On_end-lastRow_On_start}') + + if (lastRow_Off_end-lastRow_Off_start > lastRow_On_end-lastRow_On_start): + pass + else: + slow += 1 + tdLog.debug(slow) + if slow > 1: + tdLog.exit('lastRow hot alter failed') + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c93fbc5eb3..69405f7106 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -335,5 +335,5 @@ python3 ./test.py -f tag_lite/alter_tag.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py python3 test.py -f insert/insert_before_use_db.py - +python3 test.py -f alter/alter_cacheLastRow.py #======================p4-end=============== diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json new file mode 100644 index 0000000000..4637009ca3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json @@ -0,0 +1,60 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 500, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 10, + "childtable_offset":100, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}] + }] + }] +} \ No newline at end of file From d7ff9c999aae5b370d55bdc00f8d64ccd0a6a8d2 Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Tue, 1 Jun 2021 17:11:39 +0800 Subject: [PATCH 59/82] [TD-4474] moldifying the test case --- tests/pytest/alter/alter_cacheLastRow.py | 39 +++++++++++++----------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/tests/pytest/alter/alter_cacheLastRow.py b/tests/pytest/alter/alter_cacheLastRow.py index cd08fe4f37..36a2864d0f 100644 --- a/tests/pytest/alter/alter_cacheLastRow.py +++ b/tests/pytest/alter/alter_cacheLastRow.py @@ -50,52 +50,55 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - os.system("%staosdemo -f tools/taosdemoAllTest/insert_100M_rows.json -y " % binPath) - # tdSql.query('select * from stb') - # tdSql.checkRows(1000*10000) + #write 5M rows into db, then restart to force the data move into disk. + #create 500 tables + os.system("%staosdemo -f tools/taosdemoAllTest/insert_5M_rows.json -y " % binPath) tdDnodes.stop(1) tdDnodes.start(1) tdSql.execute('use db') - # tdSql.query('select * from stb') - # tdSql.checkRows(1000*10000) + + #prepare to query 500 tables last_row() tableName = [] for i in range(500): tableName.append(f"stb_{i}") - tdSql.execute('use db') lastRow_Off_start = datetime.now() - slow = 0 - for i in range(5): - for i in range(100): - for i in range(500): - tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}') + slow = 0 #count time where lastRow on is slower + for i in range(5): + #switch lastRow to off and check + tdSql.execute('alter database db cachelast 0') + tdSql.query('show databases') + tdSql.checkData(0,15,0) + + #run last_row(*) query 500 times + for i in range(500): + tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}') lastRow_Off_end = datetime.now() tdLog.debug(f'time used:{lastRow_Off_end-lastRow_Off_start}') + #switch lastRow to on and check tdSql.execute('alter database db cachelast 1') tdSql.query('show databases') tdSql.checkData(0,15,1) - - # tdDnodes.stop(1) - # tdDnodes.start(1) + #run last_row(*) query 500 times tdSql.execute('use db') lastRow_On_start = datetime.now() - for i in range(100): - for i in range(500): - tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}') + for i in range(500): + tdSql.execute(f'SELECT LAST_ROW(*) FROM {tableName[i]}') lastRow_On_end = datetime.now() tdLog.debug(f'time used:{lastRow_On_end-lastRow_On_start}') + #check which one used more time if (lastRow_Off_end-lastRow_Off_start > lastRow_On_end-lastRow_On_start): pass else: slow += 1 tdLog.debug(slow) - if slow > 1: + if slow > 1: #tolerance for the first time tdLog.exit('lastRow hot alter failed') def stop(self): tdSql.close() From 9eb020f900f18befe5d6c3b181944c13a11e0d36 Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Tue, 1 Jun 2021 18:02:32 +0800 Subject: [PATCH 60/82] [TD-4475] adding draft test case --- tests/pytest/manualTest/manual_alter_block.py | 70 +++++++++++++++++++ .../insert_5Mrows_small_cache.json | 60 ++++++++++++++++ 2 files changed, 130 insertions(+) create mode 100644 tests/pytest/manualTest/manual_alter_block.py create mode 100644 tests/pytest/tools/taosdemoAllTest/insert_5Mrows_small_cache.json diff --git a/tests/pytest/manualTest/manual_alter_block.py b/tests/pytest/manualTest/manual_alter_block.py new file mode 100644 index 0000000000..a06e1bba6f --- /dev/null +++ b/tests/pytest/manualTest/manual_alter_block.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + tdSql.execute('alter database db blocks 8') + tdSql.query('show databases') + tdSql.checkData(0,9,3) + + os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_small_cache.json" % binPath) + + input("please check memory usage for taosd. After checking, press enter") + + tdSql.execute('alter database db blocks 8') + + os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_small_cache.json" % binPath) + + input("please check memory usage for taosd. After checking, press enter") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_small_cache.json b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_small_cache.json new file mode 100644 index 0000000000..d437735ebc --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_small_cache.json @@ -0,0 +1,60 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 3, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 500, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 10, + "childtable_offset":100, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}] + }] + }] +} \ No newline at end of file From b61cf07797dc428545c062b1b923e02f585c8c5c Mon Sep 17 00:00:00 2001 From: root Date: Tue, 1 Jun 2021 10:44:05 +0000 Subject: [PATCH 61/82] [TD-4440] --- tests/script/api/stmtBatchTest.c | 438 +++++++++++++++++++++++++++---- 1 file changed, 392 insertions(+), 46 deletions(-) diff --git a/tests/script/api/stmtBatchTest.c b/tests/script/api/stmtBatchTest.c index 80848febd1..24291adfa0 100644 --- a/tests/script/api/stmtBatchTest.c +++ b/tests/script/api/stmtBatchTest.c @@ -3486,6 +3486,204 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl v->ts[i] = tts + i; } + for (int i = 0; i < 1; ++i) { + //tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; + //tags[i+0].buffer = v->v4; + //tags[i+0].is_null = &one_not_null; + //tags[i+0].length = NULL; + + tags[i+0].buffer_type = TSDB_DATA_TYPE_BOOL; + tags[i+0].buffer = v->b; + tags[i+0].is_null = &one_not_null; + tags[i+0].length = NULL; + + //tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + //tags[i+2].buffer = v->v1; + //tags[i+2].is_null = &one_not_null; + //tags[i+2].length = NULL; + + tags[i+1].buffer_type = TSDB_DATA_TYPE_SMALLINT; + tags[i+1].buffer = v->v2; + tags[i+1].is_null = &one_not_null; + tags[i+1].length = NULL; + + tags[i+2].buffer_type = TSDB_DATA_TYPE_BIGINT; + tags[i+2].buffer = v->v8; + tags[i+2].is_null = &one_not_null; + tags[i+2].length = NULL; + + tags[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + tags[i+3].buffer = v->f4; + tags[i+3].is_null = &one_not_null; + tags[i+3].length = NULL; + + tags[i+4].buffer_type = TSDB_DATA_TYPE_DOUBLE; + tags[i+4].buffer = v->f8; + tags[i+4].is_null = &one_not_null; + tags[i+4].length = NULL; + + tags[i+5].buffer_type = TSDB_DATA_TYPE_BINARY; + tags[i+5].buffer = v->br; + tags[i+5].is_null = &one_not_null; + tags[i+5].length = (uintptr_t *)lb; + + tags[i+6].buffer_type = TSDB_DATA_TYPE_NCHAR; + tags[i+6].buffer = v->nr; + tags[i+6].is_null = &one_not_null; + tags[i+6].length = (uintptr_t *)lb; + } + + + unsigned long long starttime = getCurrentTime(); + +// create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp) + //char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)"; + char *sql = "insert into ? using stb1 tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; + + int code = taos_stmt_prepare(stmt, sql, 0); + if (code != 0){ + printf("failed to execute taos_stmt_prepare. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + int id = 0; + for (int l = 0; l < bingNum; l++) { + for (int zz = 0; zz < tableNum; zz++) { + char buf[32]; + sprintf(buf, "m%d", zz); + code = taos_stmt_set_tbname_tags(stmt, buf, tags); + if (code != 0){ + printf("failed to execute taos_stmt_set_tbname. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + + for (int col=0; col < columnNum; ++col) { + code = taos_stmt_bind_single_param_batch(stmt, params + id, col); + if (code != 0){ + printf("failed to execute taos_stmt_bind_single_param_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + id++; + } + + code = taos_stmt_add_batch(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_add_batch. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + code = taos_stmt_execute(stmt); + if (code != 0) { + printf("failed to execute taos_stmt_execute. code:0x%x[%s]\n", code, tstrerror(code)); + return -1; + } + } + + unsigned long long endtime = getCurrentTime(); + unsigned long long totalRows = (uint32_t)(totalRowsPerTbl * tableNum); + printf("insert total %d records, used %u seconds, avg:%u useconds per record\n", totalRows, (endtime-starttime)/1000000UL, (endtime-starttime)/totalRows); + + free(v->ts); + free(v->br); + free(v->nr); + free(v); + free(lb); + free(params); + free(tags); + free(is_null); + free(no_null); + + return 0; +} + +// some tags are null +static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { + sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue)); + + int totalRowsPerTbl = rowsOfPerColum * bingNum; + + v->ts = (int64_t *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * tableNum)); + v->br = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + v->nr = (char *)malloc(sizeof(int64_t) * (size_t)(totalRowsPerTbl * lenOfBinaryDef)); + + int *lb = (int *)malloc(MAX_ROWS_OF_PER_COLUMN * sizeof(int)); + + TAOS_BIND *tags = calloc(1, sizeof(TAOS_BIND) * 9 * 1); + TAOS_MULTI_BIND *params = calloc(1, sizeof(TAOS_MULTI_BIND) * (size_t)(bingNum * columnNum * (tableNum+1) * rowsOfPerColum)); + char* is_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + char* no_null = malloc(sizeof(char) * MAX_ROWS_OF_PER_COLUMN); + int one_not_null = 0; + int one_is_null = 1; + + int64_t tts = 1591060628000; + + for (int i = 0; i < rowsOfPerColum; ++i) { + lb[i] = lenOfBinaryAct; + no_null[i] = 0; + is_null[i] = (i % 10 == 2) ? 1 : 0; + v->b[i] = (int8_t)(i % 2); + v->v1[i] = (int8_t)((i+1) % 2); + v->v2[i] = (int16_t)i; + v->v4[i] = (int32_t)(i+1); + v->v8[i] = (int64_t)(i+2); + v->f4[i] = (float)(i+3); + v->f8[i] = (double)(i+4); + char tbuf[MAX_BINARY_DEF_LEN]; + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "binary-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->br + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + memset(tbuf, 0, MAX_BINARY_DEF_LEN); + sprintf(tbuf, "nchar-%d-0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789", i%10); + memcpy(v->nr + i*lenOfBinaryDef, tbuf, (size_t)lenOfBinaryAct); + v->ts2[i] = tts + i; + } + + int i = 0; + for (int j = 0; j < bingNum * tableNum; j++) { + params[i+0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[i+0].buffer_length = sizeof(int64_t); + params[i+0].buffer = &v->ts[j*rowsOfPerColum]; + params[i+0].length = NULL; + params[i+0].is_null = no_null; + params[i+0].num = rowsOfPerColum; + + params[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; + params[i+1].buffer_length = sizeof(int8_t); + params[i+1].buffer = v->b; + params[i+1].length = NULL; + params[i+1].is_null = is_null; + params[i+1].num = rowsOfPerColum; + + params[i+2].buffer_type = TSDB_DATA_TYPE_INT; + params[i+2].buffer_length = sizeof(int32_t); + params[i+2].buffer = v->v4; + params[i+2].length = NULL; + params[i+2].is_null = is_null; + params[i+2].num = rowsOfPerColum; + + params[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[i+3].buffer_length = sizeof(float); + params[i+3].buffer = v->f4; + params[i+3].length = NULL; + params[i+3].is_null = is_null; + params[i+3].num = rowsOfPerColum; + + params[i+4].buffer_type = TSDB_DATA_TYPE_BINARY; + params[i+4].buffer_length = (uintptr_t)lenOfBinaryDef; + params[i+4].buffer = v->br; + params[i+4].length = lb; + params[i+4].is_null = is_null; + params[i+4].num = rowsOfPerColum; + + i+=columnNum; + } + + //int64_t tts = 1591060628000; + for (int i = 0; i < totalRowsPerTbl * tableNum; ++i) { + v->ts[i] = tts + i; + } + for (int i = 0; i < 1; ++i) { tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; tags[i+0].buffer = v->v4; @@ -3494,12 +3692,12 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; tags[i+1].buffer = v->b; - tags[i+1].is_null = &one_not_null; + tags[i+1].is_null = &one_is_null; tags[i+1].length = NULL; tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; tags[i+2].buffer = v->v1; - tags[i+2].is_null = &one_not_null; + tags[i+2].is_null = &one_is_null; tags[i+2].length = NULL; tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; @@ -3514,7 +3712,7 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT; tags[i+5].buffer = v->f4; - tags[i+5].is_null = &one_not_null; + tags[i+5].is_null = &one_is_null; tags[i+5].length = NULL; tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE; @@ -3597,7 +3795,8 @@ static int stmt_specifyCol_bind_case_002_autoCreateTbl(TAOS_STMT *stmt, int tabl return 0; } -static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { +// specify tags field, and not support , then is error case +static int stmt_specifyCol_bind_case_004_autoCreateTbl(TAOS_STMT *stmt, int tableNum, int rowsOfPerColum, int bingNum, int lenOfBinaryDef, int lenOfBinaryAct, int columnNum) { sampleValue* v = (sampleValue *)calloc(1, sizeof(sampleValue)); int totalRowsPerTbl = rowsOfPerColum * bingNum; @@ -3683,50 +3882,50 @@ static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tabl } for (int i = 0; i < 1; ++i) { - tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; - tags[i+0].buffer = v->v4; + //tags[i+0].buffer_type = TSDB_DATA_TYPE_INT; + //tags[i+0].buffer = v->v4; + //tags[i+0].is_null = &one_not_null; + //tags[i+0].length = NULL; + + tags[i+0].buffer_type = TSDB_DATA_TYPE_BOOL; + tags[i+0].buffer = v->b; tags[i+0].is_null = &one_not_null; tags[i+0].length = NULL; - tags[i+1].buffer_type = TSDB_DATA_TYPE_BOOL; - tags[i+1].buffer = v->b; + //tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; + //tags[i+2].buffer = v->v1; + //tags[i+2].is_null = &one_not_null; + //tags[i+2].length = NULL; + + tags[i+1].buffer_type = TSDB_DATA_TYPE_SMALLINT; + tags[i+1].buffer = v->v2; tags[i+1].is_null = &one_not_null; tags[i+1].length = NULL; - tags[i+2].buffer_type = TSDB_DATA_TYPE_TINYINT; - tags[i+2].buffer = v->v1; + tags[i+2].buffer_type = TSDB_DATA_TYPE_BIGINT; + tags[i+2].buffer = v->v8; tags[i+2].is_null = &one_not_null; tags[i+2].length = NULL; - tags[i+3].buffer_type = TSDB_DATA_TYPE_SMALLINT; - tags[i+3].buffer = v->v2; + tags[i+3].buffer_type = TSDB_DATA_TYPE_FLOAT; + tags[i+3].buffer = v->f4; tags[i+3].is_null = &one_not_null; tags[i+3].length = NULL; - tags[i+4].buffer_type = TSDB_DATA_TYPE_BIGINT; - tags[i+4].buffer = v->v8; + tags[i+4].buffer_type = TSDB_DATA_TYPE_DOUBLE; + tags[i+4].buffer = v->f8; tags[i+4].is_null = &one_not_null; tags[i+4].length = NULL; - tags[i+5].buffer_type = TSDB_DATA_TYPE_FLOAT; - tags[i+5].buffer = v->f4; + tags[i+5].buffer_type = TSDB_DATA_TYPE_BINARY; + tags[i+5].buffer = v->br; tags[i+5].is_null = &one_not_null; - tags[i+5].length = NULL; + tags[i+5].length = (uintptr_t *)lb; - tags[i+6].buffer_type = TSDB_DATA_TYPE_DOUBLE; - tags[i+6].buffer = v->f8; + tags[i+6].buffer_type = TSDB_DATA_TYPE_NCHAR; + tags[i+6].buffer = v->nr; tags[i+6].is_null = &one_not_null; - tags[i+6].length = NULL; - - tags[i+7].buffer_type = TSDB_DATA_TYPE_BINARY; - tags[i+7].buffer = v->br; - tags[i+7].is_null = &one_not_null; - tags[i+7].length = (uintptr_t *)lb; - - tags[i+8].buffer_type = TSDB_DATA_TYPE_NCHAR; - tags[i+8].buffer = v->nr; - tags[i+8].is_null = &one_not_null; - tags[i+8].length = (uintptr_t *)lb; + tags[i+6].length = (uintptr_t *)lb; } @@ -3734,7 +3933,7 @@ static int stmt_specifyCol_bind_case_003_autoCreateTbl(TAOS_STMT *stmt, int tabl // create table m%d (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, br binary(%d), nr nchar(%d), ts2 timestamp) //char *sql = "insert into ? (ts,b,v4,f4,br) using stb1 tags (?,?,?,?,?,?,?,?,?) values(?,?,?,?,?)"; - char *sql = "insert into ? using stb1 tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; + char *sql = "insert into ? using stb1 (id1, id2, id3, id4, id5, id6, id7, id8, id9) tags (33,?,99,?,?,?,?,?,?) (ts,b,v4,f4,br) values(?,?,?,?,?)"; int code = taos_stmt_prepare(stmt, sql, 0); if (code != 0){ @@ -3808,7 +4007,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { //=======================================================================// //=============================== single table ==========================// //========== case 1: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3830,7 +4029,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { #endif //========== case 2: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3861,7 +4060,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { #endif //========== case 2-1: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3891,7 +4090,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { } #endif //========== case 2-2: ======================// -#if 0 +#if 1 { printf("====case 2-2 error test start\n"); stmt = taos_stmt_init(taos); @@ -3924,7 +4123,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { //========== case 3: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3955,7 +4154,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { #endif //========== case 4: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -3988,7 +4187,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { //=======================================================================// //=============================== multi-rows to single table ==========================// //========== case 5: ======================// -#if 0 +#if 1 { stmt = taos_stmt_init(taos); @@ -4023,7 +4222,7 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { columnNum = 5; prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6"); - stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); totalRowsPerTbl = rowsOfPerColum * bingNum; checkResult(taos, "m0", 0, totalRowsPerTbl); @@ -4033,28 +4232,175 @@ static void SpecifyColumnBatchCase_autoCreateTbl(TAOS *taos) { } #endif - //========== case 7: ======================// -#if 0 + //========== case 7: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 200; + rowsOfPerColum = 60; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db7"); + stmt_specifyCol_bind_case_001_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m1", 0, totalRowsPerTbl); + checkResult(taos, "m99", 0, totalRowsPerTbl); + checkResult(taos, "m139", 0, totalRowsPerTbl); + checkResult(taos, "m199", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 7 check result end\n\n"); + } +#endif + + //========== case 8: ======================// +#if 1 { stmt = taos_stmt_init(taos); tableNum = 1; - rowsOfPerColum = 23740; + rowsOfPerColum = 5; bingNum = 1; lenOfBinaryDef = 40; lenOfBinaryAct = 8; - columnNum = 1; + columnNum = 5; - prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db6"); - stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db8"); + stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); totalRowsPerTbl = rowsOfPerColum * bingNum; checkResult(taos, "m0", 0, totalRowsPerTbl); taos_stmt_close(stmt); - printf("case 7 check result end\n\n"); + printf("case 8 check result end\n\n"); } #endif + //========== case 9: ======================// + +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 10; + rowsOfPerColum = 5; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db9"); + stmt_specifyCol_bind_case_002_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m3", 0, totalRowsPerTbl); + checkResult(taos, "m6", 0, totalRowsPerTbl); + checkResult(taos, "m9", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 9 check result end\n\n"); + } +#endif + + //========== case 10: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 23740; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db10"); + stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 10 check result end\n\n"); + } +#endif + + //========== case 11: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 2; + rowsOfPerColum = 5; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db11"); + stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m1", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 11 check result end\n\n"); + } +#endif + + //========== case 12: ======================// +#if 1 + { + stmt = taos_stmt_init(taos); + + tableNum = 200; + rowsOfPerColum = 60; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db12"); + stmt_specifyCol_bind_case_003_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + checkResult(taos, "m1", 0, totalRowsPerTbl); + checkResult(taos, "m99", 0, totalRowsPerTbl); + checkResult(taos, "m139", 0, totalRowsPerTbl); + checkResult(taos, "m199", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("case 12 check result end\n\n"); + } +#endif + + + //========== case 13: ======================// +#if 1 + { + printf("====case 13 error test start\n"); + stmt = taos_stmt_init(taos); + + tableNum = 1; + rowsOfPerColum = 8; + bingNum = 1; + lenOfBinaryDef = 40; + lenOfBinaryAct = 8; + columnNum = 5; + + prepareVcolumn_autoCreateTbl(taos, 1, tableNum, lenOfBinaryDef, "db13"); + stmt_specifyCol_bind_case_004_autoCreateTbl(stmt, tableNum, rowsOfPerColum, bingNum, lenOfBinaryDef, lenOfBinaryAct, columnNum); + + totalRowsPerTbl = rowsOfPerColum * bingNum; + checkResult(taos, "m0", 0, totalRowsPerTbl); + taos_stmt_close(stmt); + printf("====case 13 check result end\n\n"); + } +#endif + return ; } From 08f33005b6d1b5d7048ce046e1475558627e832a Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Tue, 1 Jun 2021 19:02:34 +0800 Subject: [PATCH 62/82] [TD-4475] update test case --- tests/pytest/manualTest/manual_alter_block.py | 8 ++- .../taosdemoAllTest/insert_5Mrows_hasTB.json | 60 +++++++++++++++++++ ...all_cache.json => insert_5Mrows_noTB.json} | 0 3 files changed, 65 insertions(+), 3 deletions(-) create mode 100644 tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json rename tests/pytest/tools/taosdemoAllTest/{insert_5Mrows_small_cache.json => insert_5Mrows_noTB.json} (100%) diff --git a/tests/pytest/manualTest/manual_alter_block.py b/tests/pytest/manualTest/manual_alter_block.py index a06e1bba6f..6d80006d7d 100644 --- a/tests/pytest/manualTest/manual_alter_block.py +++ b/tests/pytest/manualTest/manual_alter_block.py @@ -47,17 +47,19 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - tdSql.execute('alter database db blocks 8') + tdSql.execute('alter database db blocks 3') tdSql.query('show databases') tdSql.checkData(0,9,3) - os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_small_cache.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_noTB.json" % binPath) input("please check memory usage for taosd. After checking, press enter") tdSql.execute('alter database db blocks 8') + tdSql.query('show databases') + tdSql.checkData(0,9,8) - os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_small_cache.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_hasTB.json" % binPath) input("please check memory usage for taosd. After checking, press enter") diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json new file mode 100644 index 0000000000..9d22dbaabe --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json @@ -0,0 +1,60 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 3, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"yes", + "childtable_count": 500, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 500, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}] + }] + }] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_small_cache.json b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json similarity index 100% rename from tests/pytest/tools/taosdemoAllTest/insert_5Mrows_small_cache.json rename to tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json From f7d725e34afdc3c8ca98630c559676c44e6eef3c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 1 Jun 2021 20:48:09 +0800 Subject: [PATCH 63/82] [TD-3078]: fix arbitrator create time --- src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 1 + src/inc/taosdef.h | 2 ++ src/mnode/src/mnodeDnode.c | 2 +- src/sync/src/syncMain.c | 7 ++++++- 5 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 1e66ce3f0c..07d614d5ef 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -39,6 +39,7 @@ extern int8_t tsEnableTelemetryReporting; extern char tsEmail[]; extern char tsArbitrator[]; extern int8_t tsArbOnline; +extern int64_t tsArbOnlineTimestamp; extern int32_t tsDnodeId; // common diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index e2237bbee6..ed91695569 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -42,6 +42,7 @@ int32_t tsNumOfMnodes = 3; int8_t tsEnableVnodeBak = 1; int8_t tsEnableTelemetryReporting = 1; int8_t tsArbOnline = 0; +int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME; char tsEmail[TSDB_FQDN_LEN] = {0}; int32_t tsDnodeId = 0; diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 024bc198df..672d460f2c 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -373,6 +373,8 @@ do { \ #define TSDB_MAX_WAL_SIZE (1024*1024*3) +#define TSDB_ARB_DUMMY_TIME 4765104000000 // 2121-01-01 00:00:00.000, :P + typedef enum { TAOS_QTYPE_RPC = 0, TAOS_QTYPE_FWD = 1, diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index b513da29f4..1acb91beab 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -941,7 +941,7 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; - *(int64_t *)pWrite = 0; + *(int64_t *)pWrite = tsArbOnlineTimestamp; cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index e44b76d9b0..8ce37cbf78 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -1150,7 +1150,12 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) { pPeer->peerFd = connFd; pPeer->role = TAOS_SYNC_ROLE_UNSYNCED; pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer->rid, connFd); - if (pPeer->isArb) tsArbOnline = 1; + if (pPeer->isArb) { + tsArbOnline = 1; + if (tsArbOnlineTimestamp == TSDB_ARB_DUMMY_TIME) { + tsArbOnlineTimestamp = taosGetTimestampMs(); + } + } } else { sDebug("%s, failed to setup peer connection to server since %s, try later", pPeer->id, strerror(errno)); taosCloseSocket(connFd); From 07c48dadd956439e2d29799f02579b5b54db93bb Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 1 Jun 2021 21:32:21 +0800 Subject: [PATCH 64/82] Feature/sangshuduo/td 3973 use jemalloc (#6334) * [TD-3973]: add jemalloc as submodule. * add macro definitions in cmake. * [TD-3973]: use jemalloc. build works as following instructions: cmake .. -DJEMALLOC_ENABLED=true make * fix jemalloc at tag 5.2.1 * link jemalloc works. * make install works. * support jemalloc in release.sh. * release script works. Co-authored-by: Shuduo Sang --- packaging/deb/makedeb.sh | 50 +++++- packaging/rpm/makerpm.sh | 6 +- packaging/rpm/tdengine.spec | 81 +++++++-- packaging/tools/install.sh | 309 ++++++++++++++++++-------------- packaging/tools/make_install.sh | 2 +- packaging/tools/makepkg.sh | 51 +++++- 6 files changed, 329 insertions(+), 170 deletions(-) diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 28be037e6c..e6ddb6d742 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -24,14 +24,14 @@ echo "compile_dir: ${compile_dir}" echo "pkg_dir: ${pkg_dir}" if [ -d ${pkg_dir} ]; then - rm -rf ${pkg_dir} + rm -rf ${pkg_dir} fi mkdir -p ${pkg_dir} cd ${pkg_dir} libfile="libtaos.so.${tdengine_ver}" -# create install dir +# create install dir install_home_path="/usr/local/taos" mkdir -p ${pkg_dir}${install_home_path} mkdir -p ${pkg_dir}${install_home_path}/bin @@ -42,7 +42,7 @@ mkdir -p ${pkg_dir}${install_home_path}/examples mkdir -p ${pkg_dir}${install_home_path}/include mkdir -p ${pkg_dir}${install_home_path}/init.d mkdir -p ${pkg_dir}${install_home_path}/script - + cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script @@ -54,7 +54,7 @@ cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_pat cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin -cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver +cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples @@ -67,7 +67,41 @@ fi cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector -cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||: +cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||: + +if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then + install_user_local_path="/usr/local" + mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/ + if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then + cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/ + fi + if [ -f ${compile_dir}/build/bin/jeprof ]; then + cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/ + fi + if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then + cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/ + fi + if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then + cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/ + ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so + fi + if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then + cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/ + fi + if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then + cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/ + fi + if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then + cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/ + fi + if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then + cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/ + fi + if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then + cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/ + fi +fi cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ chmod 755 ${pkg_dir}/DEBIAN/* @@ -75,7 +109,7 @@ chmod 755 ${pkg_dir}/DEBIAN/* # modify version of control debver="Version: "$tdengine_ver sed -i "2c$debver" ${pkg_dir}/DEBIAN/control - + #get taos version, then set deb name @@ -90,7 +124,7 @@ fi if [ "$verType" == "beta" ]; then debname=${debname}-${verType}".deb" -elif [ "$verType" == "stable" ]; then +elif [ "$verType" == "stable" ]; then debname=${debname}".deb" else echo "unknow verType, nor stabel or beta" @@ -101,7 +135,7 @@ fi dpkg -b ${pkg_dir} $debname echo "make deb package success!" -cp ${pkg_dir}/*.deb ${output_dir} +cp ${pkg_dir}/*.deb ${output_dir} # clean tmep dir rm -rf ${pkg_dir} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index 678e75c500..7c3272f8d0 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Generate rpm package for centos +# Generate rpm package for centos set -e # set -x @@ -60,7 +60,7 @@ ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_di # copy rpm package to output_dir, and modify package name, then clean temp dir #${csudo} cp -rf RPMS/* ${output_dir} -cp_rpm_package ${pkg_dir}/RPMS +cp_rpm_package ${pkg_dir}/RPMS if [ "$verMode" == "cluster" ]; then @@ -74,7 +74,7 @@ fi if [ "$verType" == "beta" ]; then rpmname=${rpmname}-${verType}".rpm" -elif [ "$verType" == "stable" ]; then +elif [ "$verType" == "stable" ]; then rpmname=${rpmname}".rpm" else echo "unknow verType, nor stabel or beta" diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 9910e20bfe..8a870286ab 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -1,4 +1,5 @@ %define homepath /usr/local/taos +%define userlocalpath /usr/local %define cfg_install_dir /etc/taos %define __strip /bin/true @@ -12,22 +13,22 @@ URL: www.taosdata.com AutoReqProv: no #BuildRoot: %_topdir/BUILDROOT -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root #Prefix: /usr/local/taos -#BuildRequires: -#Requires: +#BuildRequires: +#Requires: %description Big Data Platform Designed and Optimized for IoT -#"prep" Nothing needs to be done +#"prep" Nothing needs to be done #%prep #%setup -q -#%setup -T +#%setup -T -#"build" Nothing needs to be done +#"build" Nothing needs to be done #%build #%configure #make %{?_smp_mflags} @@ -75,9 +76,53 @@ fi cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector -cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||: +cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||: cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples + +if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then + mkdir -p %{buildroot}%{userlocalpath}/bin + mkdir -p %{buildroot}%{userlocalpath}/lib + mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig + mkdir -p %{buildroot}%{userlocalpath}/include + mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc + mkdir -p %{buildroot}%{userlocalpath}/share + mkdir -p %{buildroot}%{userlocalpath}/share/doc + mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc + mkdir -p %{buildroot}%{userlocalpath}/share/man + mkdir -p %{buildroot}%{userlocalpath}/share/man/man3 + + cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/ + if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then + cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/ + fi + if [ -f %{_compiledir}/build/bin/jeprof ]; then + cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/ + fi + if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then + cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/ + fi + if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then + cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/ + ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so + fi + if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then + cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/ + fi + if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then + cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/ + fi + if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then + cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/ + fi + if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then + cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/ + fi + if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then + cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/ + fi +fi + #Scripts executed before installation %pre csudo="" @@ -103,7 +148,7 @@ fi # if taos.cfg already softlink, remove it if [ -f %{cfg_install_dir}/taos.cfg ]; then ${csudo} rm -f %{homepath}/cfg/taos.cfg || : -fi +fi # there can not libtaos.so*, otherwise ln -s error ${csudo} rm -f %{homepath}/driver/libtaos* || : @@ -116,18 +161,18 @@ if command -v sudo > /dev/null; then fi cd %{homepath}/script ${csudo} ./post.sh - + # Scripts executed before uninstall %preun csudo="" if command -v sudo > /dev/null; then csudo="sudo" fi -# only remove package to call preun.sh, not but update(2) +# only remove package to call preun.sh, not but update(2) if [ $1 -eq 0 ];then #cd %{homepath}/script #${csudo} ./preun.sh - + if [ -f %{homepath}/script/preun.sh ]; then cd %{homepath}/script ${csudo} ./preun.sh @@ -135,7 +180,7 @@ if [ $1 -eq 0 ];then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" - + data_link_dir="/usr/local/taos/data" log_link_dir="/usr/local/taos/log" cfg_link_dir="/usr/local/taos/cfg" @@ -149,20 +194,20 @@ if [ $1 -eq 0 ];then ${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - + ${csudo} rm -f ${log_link_dir} || : ${csudo} rm -f ${data_link_dir} || : - + pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : - fi - fi + fi + fi fi - + # Scripts executed after uninstall %postun - + # clean build dir %clean csudo="" diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 178a248cfe..325ac81053 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -59,11 +59,11 @@ initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then service_mod=0 -elif $(which service &> /dev/null); then +elif $(which service &> /dev/null); then service_mod=1 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" if $(which chkconfig &> /dev/null); then - initd_mod=1 + initd_mod=1 elif $(which insserv &> /dev/null); then initd_mod=2 elif $(which update-rc.d &> /dev/null); then @@ -71,7 +71,7 @@ elif $(which service &> /dev/null); then else service_mod=2 fi -else +else service_mod=2 fi @@ -103,7 +103,7 @@ elif echo $osinfo | grep -qwi "fedora" ; then os_type=2 else echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," + echo " This is an officially unverified linux system," echo " if there are any problems with the installation and operation, " echo " please feel free to contact taosdata.com for support." os_type=1 @@ -138,7 +138,7 @@ do echo "Usage: `basename $0` -v [server | client] -e [yes | no]" exit 0 ;; - ?) #unknow option + ?) #unknow option echo "unkonw argument" exit 1 ;; @@ -157,9 +157,9 @@ function kill_process() { function install_main_path() { #create install main dir and all sub dir ${csudo} rm -rf ${install_main_dir} || : - ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}/cfg - ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples @@ -168,10 +168,10 @@ function install_main_path() { if [ "$verMode" == "cluster" ]; then ${csudo} mkdir -p ${nginx_dir} fi - + if [[ -e ${script_dir}/email ]]; then - ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: - fi + ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + fi } function install_bin() { @@ -207,29 +207,75 @@ function install_lib() { ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : #${csudo} rm -rf ${v15_java_app_dir} || : - ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi - - #if [ "$verMode" == "cluster" ]; then + + #if [ "$verMode" == "cluster" ]; then # # Compatible with version 1.5 # ${csudo} mkdir -p ${v15_java_app_dir} # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar # ${csudo} chmod 777 ${v15_java_app_dir} || : #fi - + ${csudo} ldconfig } +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo} /usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo} /usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + fi +} + function install_header() { ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -246,13 +292,13 @@ function add_newHostname_to_hosts() { if [[ "$s" == "$localIp" ]]; then return fi - done + done ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: } function set_hostname() { echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname + read newHostname while true; do if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then break @@ -266,25 +312,25 @@ function set_hostname() { if [[ $retval != 0 ]]; then echo echo "set hostname fail!" - return + return fi #echo -e -n "$(hostnamectl status --static)" #echo -e -n "$(hostnamectl status --transient)" #echo -e -n "$(hostnamectl status --pretty)" - + #ubuntu/centos /etc/hostname if [[ -e /etc/hostname ]]; then ${csudo} echo $newHostname > /etc/hostname ||: fi - + #debian: #HOSTNAME=yourname if [[ -e /etc/sysconfig/network ]]; then ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg - serverFqdn=$newHostname - + serverFqdn=$newHostname + if [[ -e /etc/hosts ]]; then add_newHostname_to_hosts $newHostname fi @@ -302,7 +348,7 @@ function is_correct_ipaddr() { return 0 fi done - + return 1 } @@ -316,13 +362,13 @@ function set_ipAsFqdn() { echo echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" - # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg serverFqdn=$localFqdn echo return - fi - + fi + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" echo echo -e -n "${GREEN}$iplist${NC}" @@ -331,15 +377,15 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" read localFqdn while true; do - if [ ! -z "$localFqdn" ]; then + if [ ! -z "$localFqdn" ]; then # Check if correct ip address is_correct_ipaddr $localFqdn retval=`echo $?` if [[ $retval != 0 ]]; then read -p "Please choose an IP from local IP list:" localFqdn else - # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg serverFqdn=$localFqdn break fi @@ -354,59 +400,59 @@ function local_fqdn_check() { echo echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" echo - + while true do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi done fi } function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* - fi - + fi + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client - + if ((${update_flag}==1)); then return 0 fi - + if [ "$interactiveFqdn" == "no" ]; then return 0 - fi - + fi + local_fqdn_check #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" @@ -424,8 +470,8 @@ function install_config() { if [ ! -z "$firstEp" ]; then # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -433,9 +479,9 @@ function install_config() { else break fi - done + done - # user email + # user email #EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$' #EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$' #EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$" @@ -446,31 +492,31 @@ function install_config() { if [ ! -z "$emailAddr" ]; then # check the format of the emailAddr #if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then - # Write the email address to temp file - email_file="${install_main_dir}/email" + # Write the email address to temp file + email_file="${install_main_dir}/email" ${csudo} bash -c "echo $emailAddr > ${email_file}" - break + break #else - # read -p "Please enter the correct email address: " emailAddr + # read -p "Please enter the correct email address: " emailAddr #fi else break fi - done + done } function install_log() { ${csudo} rm -rf ${log_dir} || : ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + ${csudo} ln -s ${log_dir} ${install_main_dir}/log } function install_data() { ${csudo} mkdir -p ${data_dir} - - ${csudo} ln -s ${data_dir} ${install_main_dir}/data + + ${csudo} ln -s ${data_dir} ${install_main_dir}/data } function install_connector() { @@ -485,26 +531,26 @@ function install_examples() { function clean_service_on_sysvinit() { #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then ${csudo} service taosd stop || : fi - + if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/taosd ]; then + if [ -e ${service_config_dir}/taosd ]; then ${csudo} chkconfig --del taosd || : fi - if [ -e ${service_config_dir}/tarbitratord ]; then + if [ -e ${service_config_dir}/tarbitratord ]; then ${csudo} chkconfig --del tarbitratord || : fi elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/taosd ]; then + if [ -e ${service_config_dir}/taosd ]; then ${csudo} insserv -r taosd || : fi if [ -e ${service_config_dir}/tarbitratord ]; then @@ -518,10 +564,10 @@ function clean_service_on_sysvinit() { ${csudo} update-rc.d -f tarbitratord remove || : fi fi - + ${csudo} rm -f ${service_config_dir}/taosd || : ${csudo} rm -f ${service_config_dir}/tarbitratord || : - + if $(which init &> /dev/null); then ${csudo} init q || : fi @@ -544,10 +590,10 @@ function install_service_on_sysvinit() { ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord fi - + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" - + if ((${initd_mod}==1)); then ${csudo} chkconfig --add taosd || : ${csudo} chkconfig --level 2345 taosd on || : @@ -572,7 +618,7 @@ function clean_service_on_systemd() { fi ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} - + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" if systemctl is-active --quiet tarbitratord; then echo "tarbitrator is running, stopping it..." @@ -580,7 +626,7 @@ function clean_service_on_systemd() { fi ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null ${csudo} rm -f ${tarbitratord_service_config} - + if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" if systemctl is-active --quiet nginxd; then @@ -588,8 +634,8 @@ function clean_service_on_systemd() { ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo} rm -f ${nginx_service_config} - fi + ${csudo} rm -f ${nginx_service_config} + fi } # taos:2345:respawn:/etc/init.d/taosd start @@ -621,7 +667,7 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} systemctl enable taosd - + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" @@ -643,9 +689,9 @@ function install_service_on_systemd() { ${csudo} bash -c "echo >> ${tarbitratord_service_config}" ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo} systemctl enable tarbitratord - - if [ "$verMode" == "cluster" ]; then + #${csudo} systemctl enable tarbitratord + + if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" @@ -674,7 +720,7 @@ function install_service_on_systemd() { ${csudo} systemctl enable nginxd fi ${csudo} systemctl start nginxd - fi + fi } function install_service() { @@ -757,7 +803,7 @@ function update_TDengine() { fi sleep 1 fi - + if [ "$verMode" == "cluster" ]; then if pidof nginx &> /dev/null; then if ((${service_mod}==0)); then @@ -770,12 +816,13 @@ function update_TDengine() { sleep 1 fi fi - + install_main_path install_log install_header install_lib + install_jemalloc if [ "$pagMode" != "lite" ]; then install_connector fi @@ -783,10 +830,10 @@ function update_TDengine() { if [ -z $1 ]; then install_bin install_service - install_config - + install_config + openresty_work=false - if [ "$verMode" == "cluster" ]; then + if [ "$verMode" == "cluster" ]; then # Check if openresty is installed # Check if nginx is installed successfully if type curl &> /dev/null; then @@ -797,7 +844,7 @@ function update_TDengine() { echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" fi fi - fi + fi #echo #echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" @@ -816,7 +863,7 @@ function update_TDengine() { else echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}" fi - + echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" else @@ -839,14 +886,14 @@ function install_TDengine() { tar -zxf taos.tar.gz echo -e "${GREEN}Start to install TDengine...${NC}" - - install_main_path - + + install_main_path + if [ -z $1 ]; then install_data - fi - - install_log + fi + + install_log install_header install_lib if [ "$pagMode" != "lite" ]; then @@ -871,8 +918,8 @@ function install_TDengine() { fi fi fi - - install_config + + install_config # Ask if to start the service #echo @@ -885,36 +932,36 @@ function install_TDengine() { echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" else echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" - fi + fi #if [ ${openresty_work} = 'true' ]; then # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" #else # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" #fi - + if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" - echo + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" + echo fi - + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo + echo else # Only install client install_bin install_config @@ -945,6 +992,6 @@ elif [ "$verType" == "client" ]; then else install_TDengine client fi -else - echo "please input correct verType" +else + echo "please input correct verType" fi diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 240804ed95..0c755d9f72 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -204,7 +204,7 @@ function install_jemalloc() { if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib fi - if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then + if [ -f ${binary_dir}/build/lib/pkgconfig/jemalloc.pc ]; then /usr/bin/install -c -d /usr/local/lib/pkgconfig /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig fi diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index e4d2d71b01..624f72278a 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -30,12 +30,12 @@ else install_dir="${release_dir}/TDengine-server-${version}" fi -# Directories and files. +# Directories and files if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" -else +else bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" fi @@ -73,10 +73,43 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + if [ "$verMode" == "cluster" ]; then sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh mv remove_temp.sh ${install_dir}/bin/remove.sh - + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png rm -rf ${install_dir}/nginxd/png @@ -132,7 +165,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then rm -rf ${examples_dir}/JDBC/taosdemo/target fi - + cp -r ${examples_dir}/JDBC ${install_dir}/examples cp -r ${examples_dir}/matlab ${install_dir}/examples cp -r ${examples_dir}/python ${install_dir}/examples @@ -142,7 +175,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp -r ${examples_dir}/C# ${install_dir}/examples fi # Copy driver -mkdir -p ${install_dir}/driver +mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver # Copy connector @@ -168,7 +201,7 @@ fi # exit 1 -cd ${release_dir} +cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${osType}-${cpuType} @@ -185,8 +218,8 @@ fi if [ "$verType" == "beta" ]; then pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} else echo "unknow verType, nor stabel or beta" exit 1 From 6439e9addca8be102bbf2353fc191e7dd4e650b5 Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Wed, 2 Jun 2021 11:55:16 +0800 Subject: [PATCH 65/82] [TD-4475] modifying the test case --- tests/pytest/manualTest/manual_alter_block.py | 3 +++ .../pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json | 8 ++++---- .../pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json | 8 ++++---- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tests/pytest/manualTest/manual_alter_block.py b/tests/pytest/manualTest/manual_alter_block.py index 6d80006d7d..0db74e160a 100644 --- a/tests/pytest/manualTest/manual_alter_block.py +++ b/tests/pytest/manualTest/manual_alter_block.py @@ -15,6 +15,7 @@ import sys from util.log import * from util.cases import * from util.sql import * +from util.dnodes import tdDnodes class TDTestCase: @@ -58,6 +59,8 @@ class TDTestCase: tdSql.execute('alter database db blocks 8') tdSql.query('show databases') tdSql.checkData(0,9,8) + tdDnodes.stop(1) + tdDnodes.start(1) os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_hasTB.json" % binPath) diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json index 9d22dbaabe..2731e0505c 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json +++ b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json @@ -11,7 +11,7 @@ "confirm_parameter_prompt": "no", "insert_interval": 0, "interlace_rows": 100, - "num_of_records_per_req": 100, + "num_of_records_per_req": 32766, "databases": [{ "dbinfo": { "name": "db", @@ -40,7 +40,7 @@ "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10000, + "insert_rows": 100000, "childtable_limit": 500, "childtable_offset":0, "interlace_rows": 0, @@ -48,8 +48,8 @@ "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, - "timestamp_step": 10, - "start_timestamp": "2020-10-01 00:00:00.000", + "timestamp_step": 1, + "start_timestamp": "now", "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json index d437735ebc..8583b9a897 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json +++ b/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json @@ -11,7 +11,7 @@ "confirm_parameter_prompt": "no", "insert_interval": 0, "interlace_rows": 100, - "num_of_records_per_req": 100, + "num_of_records_per_req": 32766, "databases": [{ "dbinfo": { "name": "db", @@ -40,7 +40,7 @@ "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 10000, + "insert_rows": 100000, "childtable_limit": 10, "childtable_offset":100, "interlace_rows": 0, @@ -48,8 +48,8 @@ "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, - "timestamp_step": 10, - "start_timestamp": "2020-10-01 00:00:00.000", + "timestamp_step": 1, + "start_timestamp": "2019-10-01 00:00:00.000", "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", From 29ca859f1160f20ae1514d8fba1578d043ce4a7d Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Wed, 2 Jun 2021 13:37:36 +0800 Subject: [PATCH 66/82] [TD-4475] add manual test case for hot alter block --- tests/pytest/manualTest/manual_alter_block.py | 23 ++++++++++++------- ...rows_noTB.json => manual_block1_comp.json} | 8 +++---- ...t_5Mrows_hasTB.json => manual_block2.json} | 2 +- 3 files changed, 20 insertions(+), 13 deletions(-) rename tests/pytest/tools/taosdemoAllTest/{insert_5Mrows_noTB.json => manual_block1_comp.json} (84%) rename tests/pytest/tools/taosdemoAllTest/{insert_5Mrows_hasTB.json => manual_block2.json} (98%) diff --git a/tests/pytest/manualTest/manual_alter_block.py b/tests/pytest/manualTest/manual_alter_block.py index 0db74e160a..ccd98b1421 100644 --- a/tests/pytest/manualTest/manual_alter_block.py +++ b/tests/pytest/manualTest/manual_alter_block.py @@ -17,7 +17,8 @@ from util.cases import * from util.sql import * from util.dnodes import tdDnodes - +##TODO: auto test version is currently unsupported, need to come up with +# an auto test version in the future class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -48,27 +49,33 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" + #alter cache block to 3, then check alter tdSql.execute('alter database db blocks 3') tdSql.query('show databases') tdSql.checkData(0,9,3) - os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_noTB.json" % binPath) - + #run taosdemo to occupy all cache, need to manually check memory consumption + os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) input("please check memory usage for taosd. After checking, press enter") + #alter cache block to 8, then check alter tdSql.execute('alter database db blocks 8') tdSql.query('show databases') tdSql.checkData(0,9,8) - tdDnodes.stop(1) - tdDnodes.start(1) - - os.system("%staosdemo -f tools/taosdemoAllTest/insert_5Mrows_hasTB.json" % binPath) + #run taosdemo to occupy all cache, need to manually check memory consumption + os.system("%staosdemo -f tools/taosdemoAllTest/manual_block2.json" % binPath) input("please check memory usage for taosd. After checking, press enter") + ##expected result the peak memory consumption should increase by around 80MB = 5 blocks of cache + + ##test results + #2021/06/02 before:2621700K after: 2703640K memory usage increased by 80MB = 5 block + # confirm with the change in block. Baosheng Chang + def stop(self): tdSql.close() - tdLog.success("%s successfully executed" % __file__) + tdLog.debug("%s alter block manual check finish" % __file__) tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json similarity index 84% rename from tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json rename to tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json index 8583b9a897..3ced8272b2 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_noTB.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json @@ -22,7 +22,7 @@ "blocks": 3, "precision": "ms", "keep": 3650, - "minRows": 100, + "minRows": 1000, "maxRows": 4096, "comp":2, "walLevel":1, @@ -40,7 +40,7 @@ "batch_create_tbl_num": 20, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 100000, + "insert_rows": 10000, "childtable_limit": 10, "childtable_offset":100, "interlace_rows": 0, @@ -53,8 +53,8 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}], - "tags": [{"type": "TINYINT", "count":2}] + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json b/tests/pytest/tools/taosdemoAllTest/manual_block2.json similarity index 98% rename from tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json rename to tests/pytest/tools/taosdemoAllTest/manual_block2.json index 2731e0505c..434159159b 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert_5Mrows_hasTB.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block2.json @@ -19,7 +19,7 @@ "replica": 1, "days": 10, "cache": 16, - "blocks": 3, + "blocks": 8, "precision": "ms", "keep": 3650, "minRows": 100, From b00ba324f34879b112195fe3d3d741926625e962 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 2 Jun 2021 14:04:20 +0800 Subject: [PATCH 67/82] support modify tag column length --- src/client/src/tscSQLParser.c | 77 ++- src/inc/taosmsg.h | 1 + src/inc/ttokendef.h | 5 +- src/mnode/src/mnodeTable.c | 5 + src/query/inc/sql.y | 42 +- src/query/src/qSqlParser.c | 2 +- src/query/src/sql.c | 569 ++++++++++--------- src/util/src/ttokenizer.c | 2 +- tests/script/general/parser/alter_column.sim | 81 ++- 9 files changed, 472 insertions(+), 312 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 978fbaf521..80768b64f2 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5079,8 +5079,10 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg18 = "primary timestamp column cannot be dropped"; const char* msg19 = "invalid new tag name"; const char* msg20 = "table is not super table"; - const char* msg21 = "only binary/nchar column length could be altered"; - const char* msg22 = "invalid column length"; + const char* msg21 = "only binary/nchar column length could be modified"; + const char* msg22 = "new column length should be bigger than old one"; + const char* msg23 = "only column length coulbe be modified"; + const char* msg24 = "invalid binary/nchar column length"; int32_t code = TSDB_CODE_SUCCESS; @@ -5111,8 +5113,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN || - pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) { - if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { + pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) { + if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } } else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) { @@ -5334,14 +5336,18 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypes[TSDB_DATA_TYPE_INT].bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } else if (pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { - if (taosArrayGetSize(pAlterSQL->pAddColumns) != 2) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), NULL); + if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16); } - tVariantListItem* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0); + + TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0); + if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21); + } SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen}; + SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = strlen(pItem->name)}; if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17); } @@ -5352,14 +5358,61 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21); } - pItem = taosArrayGet(pAlterSQL->pAddColumns, 1); - int16_t nlen = 0; + if (pItem->type != pColSchema->type) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23); + } - if (tVariantDump(&pItem->pVar, (char *)&nlen, TSDB_DATA_TYPE_SMALLINT, false) < 0 || nlen <= 0) { + if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) || + (pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24); + } + + if (pItem->bytes <= pColSchema->bytes) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22); } - TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, nlen); + TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes); + tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); + }else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) { + if (taosArrayGetSize(pAlterSQL->pAddColumns) >= 2) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg16); + } + + TAOS_FIELD* pItem = taosArrayGet(pAlterSQL->pAddColumns, 0); + if (pItem->type != TSDB_DATA_TYPE_BINARY && pItem->type != TSDB_DATA_TYPE_NCHAR) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21); + } + + SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; + SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = strlen(pItem->name)}; + if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17); + } + + SSchema* pColSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); + + if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); + } + + if (pColSchema->type != TSDB_DATA_TYPE_BINARY && pColSchema->type != TSDB_DATA_TYPE_NCHAR) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg21); + } + + if (pItem->type != pColSchema->type) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg23); + } + + if ((pItem->type == TSDB_DATA_TYPE_BINARY && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_BINARY_LEN)) || + (pItem->type == TSDB_DATA_TYPE_NCHAR && (pItem->bytes <= 0 || pItem->bytes > TSDB_MAX_NCHAR_LEN))) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg24); + } + + if (pItem->bytes <= pColSchema->bytes) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg22); + } + + TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes); tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f); } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index ff3cc6e956..413f72720e 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -161,6 +161,7 @@ enum _mgmt_table { #define TSDB_ALTER_TABLE_ADD_COLUMN 5 #define TSDB_ALTER_TABLE_DROP_COLUMN 6 #define TSDB_ALTER_TABLE_CHANGE_COLUMN 7 +#define TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN 8 #define TSDB_FILL_NONE 0 #define TSDB_FILL_NULL 1 diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index e5f1472317..e0c9abed18 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -155,7 +155,7 @@ #define TK_SYNCDB 136 #define TK_ADD 137 #define TK_COLUMN 138 -#define TK_LENGTH 139 +#define TK_MODIFY 139 #define TK_TAG 140 #define TK_CHANGE 141 #define TK_SET 142 @@ -211,6 +211,9 @@ + + + #define TK_SPACE 300 #define TK_COMMENT 301 #define TK_ILLEGAL 302 diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 6eca87514b..5b699c5e24 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -3218,6 +3218,11 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { (void)mnodeChangeSuperTableColumn; mError("change table[%s] column[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes); code = TSDB_CODE_SUCCESS; + } else if (pAlter->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) { + //code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name); + (void)mnodeChangeSuperTableColumn; + mError("change table[%s] tag[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes); + code = TSDB_CODE_SUCCESS; } else { } } else { diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 3a6e1c0cc0..1b173a272f 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -754,15 +754,9 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) DROP COLUMN ids(A). { setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -cmd ::= ALTER TABLE ids(X) cpxName(F) ALTER COLUMN LENGTH ids(A) INTEGER(Z). { +cmd ::= ALTER TABLE ids(X) cpxName(F) MODIFY COLUMN columnlist(A). { X.n += F.n; - - toTSDBType(A.type); - SArray* K = tVariantListAppendToken(NULL, &A, -1); - toTSDBType(Z.type); - K = tVariantListAppendToken(K, &Z, -1); - - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, K, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } @@ -806,6 +800,11 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +cmd ::= ALTER TABLE ids(X) cpxName(F) MODIFY TAG columnlist(A). { + X.n += F.n; + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} ///////////////////////////////////ALTER STABLE statement////////////////////////////////// cmd ::= ALTER STABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). { @@ -824,15 +823,9 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) DROP COLUMN ids(A). { setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } -cmd ::= ALTER STABLE ids(X) cpxName(F) ALTER COLUMN LENGTH ids(A) INTEGER(Z). { +cmd ::= ALTER STABLE ids(X) cpxName(F) MODIFY COLUMN columnlist(A). { X.n += F.n; - - toTSDBType(A.type); - SArray* K = tVariantListAppendToken(NULL, &A, -1); - toTSDBType(Z.type); - K = tVariantListAppendToken(K, &Z, -1); - - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, K, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } @@ -865,6 +858,23 @@ cmd ::= ALTER STABLE ids(X) cpxName(F) CHANGE TAG ids(Y) ids(Z). { setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } +cmd ::= ALTER STABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). { + X.n += F.n; + + toTSDBType(Y.type); + SArray* A = tVariantListAppendToken(NULL, &Y, -1); + A = tVariantListAppend(A, &Z, -1); + + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + +cmd ::= ALTER STABLE ids(X) cpxName(F) MODIFY TAG columnlist(A). { + X.n += F.n; + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&X, A, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + ////////////////////////////////////////kill statement/////////////////////////////////////// cmd ::= KILL CONNECTION INTEGER(Y). {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);} cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &X);} diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index b912c9ffdc..d41b474953 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -887,7 +887,7 @@ SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray pAlterTable->type = type; pAlterTable->tableType = tableType; - if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { + if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || type == TSDB_ALTER_TABLE_CHANGE_COLUMN || type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) { pAlterTable->pAddColumns = pCols; assert(pVals == NULL); } else { diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 560e499228..588da79883 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -136,18 +136,18 @@ typedef union { #define ParseCTX_FETCH #define ParseCTX_STORE #define YYFALLBACK 1 -#define YYNSTATE 331 -#define YYNRULE 275 -#define YYNRULE_WITH_ACTION 275 +#define YYNSTATE 337 +#define YYNRULE 278 +#define YYNRULE_WITH_ACTION 278 #define YYNTOKEN 188 -#define YY_MAX_SHIFT 330 -#define YY_MIN_SHIFTREDUCE 528 -#define YY_MAX_SHIFTREDUCE 802 -#define YY_ERROR_ACTION 803 -#define YY_ACCEPT_ACTION 804 -#define YY_NO_ACTION 805 -#define YY_MIN_REDUCE 806 -#define YY_MAX_REDUCE 1080 +#define YY_MAX_SHIFT 336 +#define YY_MIN_SHIFTREDUCE 533 +#define YY_MAX_SHIFTREDUCE 810 +#define YY_ERROR_ACTION 811 +#define YY_ACCEPT_ACTION 812 +#define YY_NO_ACTION 813 +#define YY_MIN_REDUCE 814 +#define YY_MAX_REDUCE 1091 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -214,78 +214,79 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (697) +#define YY_ACTTAB_COUNT (707) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 974, 576, 211, 328, 70, 18, 217, 965, 188, 577, - /* 10 */ 804, 330, 186, 48, 49, 146, 52, 53, 220, 1060, - /* 20 */ 223, 42, 214, 51, 272, 56, 54, 58, 55, 939, - /* 30 */ 655, 188, 953, 47, 46, 188, 938, 45, 44, 43, - /* 40 */ 48, 49, 1059, 52, 53, 219, 1060, 223, 42, 576, - /* 50 */ 51, 272, 56, 54, 58, 55, 965, 577, 304, 303, - /* 60 */ 47, 46, 971, 146, 45, 44, 43, 49, 31, 52, - /* 70 */ 53, 250, 139, 223, 42, 83, 51, 272, 56, 54, - /* 80 */ 58, 55, 288, 1009, 88, 267, 47, 46, 72, 314, - /* 90 */ 45, 44, 43, 529, 530, 531, 532, 533, 534, 535, - /* 100 */ 536, 537, 538, 539, 540, 541, 329, 235, 288, 212, - /* 110 */ 71, 576, 949, 48, 49, 31, 52, 53, 941, 577, - /* 120 */ 223, 42, 576, 51, 272, 56, 54, 58, 55, 269, - /* 130 */ 577, 81, 744, 47, 46, 257, 256, 45, 44, 43, - /* 140 */ 48, 50, 951, 52, 53, 146, 192, 223, 42, 77, - /* 150 */ 51, 272, 56, 54, 58, 55, 213, 37, 947, 950, - /* 160 */ 47, 46, 1, 160, 45, 44, 43, 24, 286, 323, - /* 170 */ 322, 285, 284, 283, 321, 282, 320, 319, 318, 281, - /* 180 */ 317, 316, 913, 31, 901, 902, 903, 904, 905, 906, - /* 190 */ 907, 908, 909, 910, 911, 912, 914, 915, 52, 53, - /* 200 */ 229, 29, 223, 42, 278, 51, 272, 56, 54, 58, - /* 210 */ 55, 694, 19, 1008, 25, 47, 46, 746, 965, 45, - /* 220 */ 44, 43, 222, 759, 226, 31, 748, 950, 751, 197, - /* 230 */ 754, 222, 759, 215, 13, 748, 198, 751, 87, 754, - /* 240 */ 84, 123, 122, 196, 45, 44, 43, 110, 56, 54, - /* 250 */ 58, 55, 314, 747, 208, 209, 47, 46, 271, 74, - /* 260 */ 45, 44, 43, 208, 209, 75, 227, 253, 24, 950, - /* 270 */ 323, 322, 77, 253, 750, 321, 753, 320, 319, 318, - /* 280 */ 37, 317, 316, 921, 1056, 679, 919, 920, 676, 698, - /* 290 */ 677, 922, 678, 924, 925, 923, 85, 926, 927, 108, - /* 300 */ 101, 113, 249, 691, 69, 31, 112, 118, 121, 111, - /* 310 */ 8, 205, 5, 34, 162, 115, 237, 238, 273, 161, - /* 320 */ 95, 90, 94, 31, 234, 57, 232, 936, 937, 30, - /* 330 */ 940, 301, 760, 293, 57, 180, 178, 176, 756, 31, - /* 340 */ 31, 760, 175, 126, 125, 124, 294, 756, 146, 950, - /* 350 */ 242, 47, 46, 1055, 755, 45, 44, 43, 1054, 246, - /* 360 */ 245, 228, 230, 755, 295, 324, 749, 950, 752, 852, - /* 370 */ 327, 326, 131, 172, 137, 135, 134, 3, 173, 1071, - /* 380 */ 302, 306, 221, 950, 950, 861, 757, 953, 953, 172, - /* 390 */ 62, 953, 853, 236, 680, 233, 172, 298, 297, 290, - /* 400 */ 725, 726, 251, 710, 716, 717, 32, 141, 61, 21, - /* 410 */ 65, 780, 63, 761, 763, 20, 82, 20, 665, 275, - /* 420 */ 667, 277, 32, 32, 61, 86, 6, 100, 666, 99, - /* 430 */ 66, 15, 61, 14, 107, 68, 106, 654, 206, 683, - /* 440 */ 17, 684, 16, 681, 207, 682, 120, 119, 952, 190, - /* 450 */ 191, 193, 187, 194, 195, 201, 202, 200, 185, 1019, - /* 460 */ 199, 189, 1018, 224, 40, 1015, 1014, 225, 305, 247, - /* 470 */ 138, 973, 156, 984, 1001, 981, 982, 966, 758, 254, - /* 480 */ 1000, 986, 140, 144, 136, 948, 157, 258, 148, 216, - /* 490 */ 709, 917, 963, 147, 149, 946, 150, 151, 158, 266, - /* 500 */ 159, 864, 280, 260, 265, 67, 64, 59, 38, 270, - /* 510 */ 183, 35, 289, 264, 268, 860, 1077, 96, 291, 1076, - /* 520 */ 1073, 163, 262, 296, 1070, 103, 299, 1069, 1066, 164, - /* 530 */ 882, 36, 33, 39, 184, 849, 114, 847, 116, 117, - /* 540 */ 845, 844, 239, 174, 842, 841, 840, 839, 838, 837, - /* 550 */ 177, 179, 41, 834, 832, 830, 828, 181, 825, 182, - /* 560 */ 259, 252, 315, 73, 78, 109, 261, 1002, 307, 308, - /* 570 */ 309, 310, 311, 312, 210, 313, 231, 325, 279, 802, - /* 580 */ 241, 240, 801, 204, 203, 243, 91, 92, 244, 800, - /* 590 */ 843, 786, 785, 248, 127, 274, 253, 686, 836, 167, - /* 600 */ 128, 166, 883, 165, 168, 169, 171, 129, 170, 835, - /* 610 */ 2, 130, 9, 827, 826, 26, 76, 4, 255, 79, - /* 620 */ 711, 152, 153, 154, 155, 929, 142, 218, 714, 143, - /* 630 */ 80, 263, 764, 718, 145, 10, 11, 762, 27, 7, - /* 640 */ 28, 12, 22, 276, 23, 89, 618, 87, 614, 612, - /* 650 */ 611, 610, 607, 580, 287, 93, 97, 796, 32, 789, - /* 660 */ 657, 656, 653, 98, 60, 102, 602, 600, 592, 598, - /* 670 */ 594, 292, 596, 590, 104, 588, 621, 620, 619, 617, - /* 680 */ 105, 300, 616, 615, 613, 609, 608, 61, 578, 545, - /* 690 */ 132, 543, 806, 805, 805, 805, 133, + /* 0 */ 982, 581, 215, 334, 75, 22, 221, 973, 192, 582, + /* 10 */ 812, 336, 190, 52, 53, 150, 56, 57, 224, 1068, + /* 20 */ 227, 46, 218, 55, 278, 60, 58, 62, 59, 947, + /* 30 */ 660, 192, 961, 51, 50, 192, 946, 49, 48, 47, + /* 40 */ 52, 53, 1067, 56, 57, 223, 1068, 227, 46, 581, + /* 50 */ 55, 278, 60, 58, 62, 59, 973, 582, 310, 309, + /* 60 */ 51, 50, 979, 150, 49, 48, 47, 53, 35, 56, + /* 70 */ 57, 256, 143, 227, 46, 88, 55, 278, 60, 58, + /* 80 */ 62, 59, 294, 1017, 93, 273, 51, 50, 77, 320, + /* 90 */ 49, 48, 47, 534, 535, 536, 537, 538, 539, 540, + /* 100 */ 541, 542, 543, 544, 545, 546, 335, 241, 294, 216, + /* 110 */ 76, 581, 957, 52, 53, 35, 56, 57, 949, 582, + /* 120 */ 227, 46, 581, 55, 278, 60, 58, 62, 59, 275, + /* 130 */ 582, 86, 749, 51, 50, 263, 262, 49, 48, 47, + /* 140 */ 52, 54, 959, 56, 57, 150, 196, 227, 46, 82, + /* 150 */ 55, 278, 60, 58, 62, 59, 217, 41, 955, 958, + /* 160 */ 51, 50, 1, 164, 49, 48, 47, 28, 292, 329, + /* 170 */ 328, 291, 290, 289, 327, 288, 326, 325, 324, 287, + /* 180 */ 323, 322, 921, 35, 909, 910, 911, 912, 913, 914, + /* 190 */ 915, 916, 917, 918, 919, 920, 922, 923, 56, 57, + /* 200 */ 233, 1064, 227, 46, 696, 55, 278, 60, 58, 62, + /* 210 */ 59, 8, 23, 1016, 29, 51, 50, 1063, 973, 49, + /* 220 */ 48, 47, 226, 764, 230, 35, 753, 958, 756, 201, + /* 230 */ 759, 226, 764, 219, 1062, 753, 202, 756, 755, 759, + /* 240 */ 758, 127, 126, 200, 49, 48, 47, 210, 60, 58, + /* 250 */ 62, 59, 3, 177, 212, 213, 51, 50, 277, 79, + /* 260 */ 49, 48, 47, 212, 213, 80, 231, 259, 28, 958, + /* 270 */ 329, 328, 82, 259, 754, 327, 757, 326, 325, 324, + /* 280 */ 41, 323, 322, 929, 114, 684, 927, 928, 681, 320, + /* 290 */ 682, 930, 683, 932, 933, 931, 90, 934, 935, 112, + /* 300 */ 106, 117, 255, 150, 74, 35, 116, 122, 125, 115, + /* 310 */ 237, 209, 5, 38, 166, 119, 243, 244, 211, 165, + /* 320 */ 100, 95, 99, 35, 240, 61, 225, 944, 945, 34, + /* 330 */ 948, 35, 765, 35, 61, 184, 182, 180, 761, 232, + /* 340 */ 234, 765, 179, 130, 129, 128, 299, 761, 35, 958, + /* 350 */ 762, 51, 50, 699, 760, 49, 48, 47, 35, 248, + /* 360 */ 35, 330, 35, 760, 300, 961, 961, 958, 252, 251, + /* 370 */ 6, 87, 301, 279, 302, 958, 13, 958, 194, 238, + /* 380 */ 92, 236, 89, 298, 297, 70, 763, 961, 751, 306, + /* 390 */ 124, 123, 958, 242, 685, 239, 860, 305, 304, 307, + /* 400 */ 176, 308, 958, 312, 958, 71, 958, 333, 332, 135, + /* 410 */ 141, 139, 138, 869, 257, 861, 67, 176, 36, 176, + /* 420 */ 730, 731, 715, 1087, 752, 721, 145, 195, 722, 66, + /* 430 */ 785, 703, 25, 766, 24, 670, 281, 24, 68, 36, + /* 440 */ 36, 672, 283, 671, 197, 66, 91, 66, 33, 191, + /* 450 */ 15, 284, 14, 105, 73, 104, 659, 198, 199, 17, + /* 460 */ 19, 16, 18, 205, 111, 21, 110, 20, 688, 768, + /* 470 */ 689, 686, 206, 687, 204, 1079, 960, 189, 203, 193, + /* 480 */ 1027, 1026, 228, 253, 1023, 1022, 229, 311, 142, 981, + /* 490 */ 992, 989, 44, 990, 994, 974, 260, 144, 1009, 148, + /* 500 */ 140, 160, 956, 1008, 161, 264, 925, 220, 266, 714, + /* 510 */ 954, 321, 971, 156, 151, 162, 152, 158, 276, 153, + /* 520 */ 163, 271, 63, 872, 286, 42, 187, 72, 69, 274, + /* 530 */ 39, 295, 868, 154, 296, 272, 1086, 102, 1085, 270, + /* 540 */ 1082, 268, 167, 303, 1078, 108, 1077, 1074, 168, 265, + /* 550 */ 890, 40, 37, 43, 188, 857, 118, 855, 120, 121, + /* 560 */ 853, 852, 245, 178, 850, 849, 848, 847, 846, 845, + /* 570 */ 181, 183, 842, 840, 838, 836, 185, 833, 45, 186, + /* 580 */ 113, 258, 78, 83, 313, 267, 1010, 314, 315, 316, + /* 590 */ 317, 318, 319, 214, 235, 331, 810, 285, 246, 247, + /* 600 */ 809, 207, 208, 96, 97, 249, 250, 808, 791, 790, + /* 610 */ 254, 81, 259, 851, 691, 280, 844, 171, 170, 891, + /* 620 */ 169, 172, 174, 173, 175, 131, 132, 133, 843, 4, + /* 630 */ 134, 835, 9, 834, 30, 261, 2, 716, 84, 146, + /* 640 */ 155, 157, 937, 159, 719, 85, 222, 147, 269, 31, + /* 650 */ 723, 149, 32, 767, 10, 7, 11, 769, 12, 26, + /* 660 */ 282, 27, 94, 623, 92, 619, 617, 616, 615, 612, + /* 670 */ 585, 293, 98, 64, 101, 36, 662, 661, 658, 607, + /* 680 */ 605, 65, 103, 597, 603, 599, 601, 595, 593, 107, + /* 690 */ 109, 626, 625, 624, 622, 621, 620, 618, 614, 613, + /* 700 */ 583, 66, 550, 548, 814, 136, 137, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 191, 1, 190, 191, 197, 252, 210, 234, 252, 9, @@ -308,57 +309,57 @@ static const YYCODETYPE yy_lookahead[] = { /* 170 */ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, /* 180 */ 101, 102, 209, 191, 211, 212, 213, 214, 215, 216, /* 190 */ 217, 218, 219, 220, 221, 222, 223, 224, 16, 17, - /* 200 */ 233, 104, 20, 21, 107, 23, 24, 25, 26, 27, - /* 210 */ 28, 37, 44, 259, 104, 33, 34, 1, 234, 37, + /* 200 */ 233, 252, 20, 21, 109, 23, 24, 25, 26, 27, + /* 210 */ 28, 116, 44, 259, 104, 33, 34, 252, 234, 37, /* 220 */ 38, 39, 1, 2, 232, 191, 5, 235, 7, 61, - /* 230 */ 9, 1, 2, 249, 104, 5, 68, 7, 108, 9, - /* 240 */ 110, 73, 74, 75, 37, 38, 39, 76, 25, 26, - /* 250 */ 27, 28, 81, 37, 33, 34, 33, 34, 37, 105, + /* 230 */ 9, 1, 2, 249, 252, 5, 68, 7, 5, 9, + /* 240 */ 7, 73, 74, 75, 37, 38, 39, 252, 25, 26, + /* 250 */ 27, 28, 194, 195, 33, 34, 33, 34, 37, 105, /* 260 */ 37, 38, 39, 33, 34, 105, 232, 113, 88, 235, /* 270 */ 90, 91, 104, 113, 5, 95, 7, 97, 98, 99, - /* 280 */ 112, 101, 102, 209, 252, 2, 212, 213, 5, 115, + /* 280 */ 112, 101, 102, 209, 76, 2, 212, 213, 5, 81, /* 290 */ 7, 217, 9, 219, 220, 221, 197, 223, 224, 62, - /* 300 */ 63, 64, 134, 109, 136, 191, 69, 70, 71, 72, - /* 310 */ 116, 143, 62, 63, 64, 78, 33, 34, 15, 69, - /* 320 */ 70, 71, 72, 191, 68, 104, 68, 228, 229, 230, - /* 330 */ 231, 75, 111, 75, 104, 62, 63, 64, 117, 191, - /* 340 */ 191, 111, 69, 70, 71, 72, 232, 117, 191, 235, - /* 350 */ 135, 33, 34, 252, 133, 37, 38, 39, 252, 144, - /* 360 */ 145, 210, 210, 133, 232, 210, 5, 235, 7, 196, - /* 370 */ 65, 66, 67, 200, 62, 63, 64, 194, 195, 236, - /* 380 */ 232, 232, 60, 235, 235, 196, 117, 236, 236, 200, - /* 390 */ 109, 236, 196, 137, 111, 137, 200, 141, 142, 141, - /* 400 */ 124, 125, 105, 105, 105, 105, 109, 109, 109, 109, - /* 410 */ 109, 105, 131, 105, 111, 109, 259, 109, 105, 105, - /* 420 */ 105, 105, 109, 109, 109, 109, 104, 138, 105, 140, - /* 430 */ 129, 138, 109, 140, 138, 104, 140, 106, 252, 5, - /* 440 */ 138, 7, 140, 5, 252, 7, 76, 77, 236, 252, - /* 450 */ 252, 252, 252, 252, 252, 252, 252, 252, 252, 227, - /* 460 */ 252, 252, 227, 227, 251, 227, 227, 227, 227, 191, - /* 470 */ 191, 191, 238, 191, 260, 191, 191, 234, 117, 234, - /* 480 */ 260, 191, 191, 191, 60, 234, 191, 256, 246, 256, - /* 490 */ 117, 225, 248, 247, 245, 191, 244, 243, 191, 121, - /* 500 */ 191, 191, 191, 256, 256, 128, 130, 127, 191, 122, - /* 510 */ 191, 191, 191, 120, 126, 191, 191, 191, 191, 191, - /* 520 */ 191, 191, 119, 191, 191, 191, 191, 191, 191, 191, - /* 530 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, - /* 540 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, - /* 550 */ 191, 191, 132, 191, 191, 191, 191, 191, 191, 191, - /* 560 */ 118, 192, 103, 192, 192, 87, 192, 192, 86, 50, - /* 570 */ 83, 85, 54, 84, 192, 82, 192, 79, 192, 5, - /* 580 */ 5, 146, 5, 192, 192, 146, 197, 197, 5, 5, - /* 590 */ 192, 90, 89, 135, 193, 107, 113, 105, 192, 202, - /* 600 */ 193, 206, 208, 207, 205, 203, 201, 193, 204, 192, - /* 610 */ 198, 193, 104, 192, 192, 104, 114, 194, 109, 109, - /* 620 */ 105, 242, 241, 240, 239, 225, 104, 1, 105, 109, - /* 630 */ 104, 104, 111, 105, 104, 123, 123, 105, 109, 104, - /* 640 */ 109, 104, 104, 107, 104, 76, 9, 108, 5, 5, - /* 650 */ 5, 5, 5, 80, 15, 76, 140, 5, 109, 5, - /* 660 */ 5, 5, 105, 139, 16, 140, 5, 5, 5, 5, - /* 670 */ 5, 138, 5, 5, 140, 5, 5, 5, 5, 5, - /* 680 */ 139, 138, 5, 5, 5, 5, 5, 109, 80, 60, - /* 690 */ 21, 59, 0, 264, 264, 264, 21, 264, 264, 264, - /* 700 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, + /* 300 */ 63, 64, 134, 191, 136, 191, 69, 70, 71, 72, + /* 310 */ 68, 143, 62, 63, 64, 78, 33, 34, 252, 69, + /* 320 */ 70, 71, 72, 191, 68, 104, 60, 228, 229, 230, + /* 330 */ 231, 191, 111, 191, 104, 62, 63, 64, 117, 210, + /* 340 */ 210, 111, 69, 70, 71, 72, 232, 117, 191, 235, + /* 350 */ 117, 33, 34, 37, 133, 37, 38, 39, 191, 135, + /* 360 */ 191, 210, 191, 133, 232, 236, 236, 235, 144, 145, + /* 370 */ 104, 259, 232, 15, 232, 235, 104, 235, 252, 137, + /* 380 */ 108, 139, 110, 141, 142, 109, 117, 236, 1, 232, + /* 390 */ 76, 77, 235, 137, 111, 139, 196, 141, 142, 232, + /* 400 */ 200, 232, 235, 232, 235, 129, 235, 65, 66, 67, + /* 410 */ 62, 63, 64, 196, 105, 196, 109, 200, 109, 200, + /* 420 */ 124, 125, 105, 236, 37, 105, 109, 252, 105, 109, + /* 430 */ 105, 115, 109, 105, 109, 105, 105, 109, 131, 109, + /* 440 */ 109, 105, 105, 105, 252, 109, 109, 109, 104, 252, + /* 450 */ 138, 107, 140, 138, 104, 140, 106, 252, 252, 138, + /* 460 */ 138, 140, 140, 252, 138, 138, 140, 140, 5, 111, + /* 470 */ 7, 5, 252, 7, 252, 236, 236, 252, 252, 252, + /* 480 */ 227, 227, 227, 191, 227, 227, 227, 227, 191, 191, + /* 490 */ 191, 191, 251, 191, 191, 234, 234, 191, 260, 191, + /* 500 */ 60, 238, 234, 260, 191, 256, 225, 256, 256, 117, + /* 510 */ 191, 103, 248, 242, 247, 191, 246, 240, 122, 245, + /* 520 */ 191, 256, 127, 191, 191, 191, 191, 128, 130, 126, + /* 530 */ 191, 191, 191, 244, 191, 121, 191, 191, 191, 120, + /* 540 */ 191, 119, 191, 191, 191, 191, 191, 191, 191, 118, + /* 550 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, + /* 560 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, + /* 570 */ 191, 191, 191, 191, 191, 191, 191, 191, 132, 191, + /* 580 */ 87, 192, 192, 192, 86, 192, 192, 50, 83, 85, + /* 590 */ 54, 84, 82, 192, 192, 79, 5, 192, 146, 5, + /* 600 */ 5, 192, 192, 197, 197, 146, 5, 5, 90, 89, + /* 610 */ 135, 114, 113, 192, 105, 107, 192, 202, 206, 208, + /* 620 */ 207, 205, 204, 203, 201, 193, 193, 193, 192, 194, + /* 630 */ 193, 192, 104, 192, 104, 109, 198, 105, 109, 104, + /* 640 */ 243, 241, 225, 239, 105, 104, 1, 109, 104, 109, + /* 650 */ 105, 104, 109, 105, 123, 104, 123, 111, 104, 104, + /* 660 */ 107, 104, 76, 9, 108, 5, 5, 5, 5, 5, + /* 670 */ 80, 15, 76, 16, 140, 109, 5, 5, 105, 5, + /* 680 */ 5, 16, 140, 5, 5, 5, 5, 5, 5, 140, + /* 690 */ 140, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 700 */ 80, 109, 60, 59, 0, 21, 21, 264, 264, 264, /* 710 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, /* 720 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, /* 730 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, @@ -376,106 +377,107 @@ static const YYCODETYPE yy_lookahead[] = { /* 850 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, /* 860 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, /* 870 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, - /* 880 */ 264, 264, 264, 264, 264, + /* 880 */ 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, + /* 890 */ 264, 264, 264, 264, 264, }; -#define YY_SHIFT_COUNT (330) +#define YY_SHIFT_COUNT (336) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (692) +#define YY_SHIFT_MAX (704) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 168, 79, 79, 180, 180, 3, 221, 230, 110, 121, - /* 10 */ 121, 121, 121, 121, 121, 121, 121, 121, 0, 48, - /* 20 */ 230, 283, 283, 283, 283, 45, 45, 121, 121, 121, - /* 30 */ 29, 121, 121, 171, 3, 8, 8, 697, 697, 697, - /* 40 */ 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, + /* 10 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, + /* 20 */ 121, 121, 0, 48, 230, 283, 283, 283, 283, 45, + /* 30 */ 45, 121, 121, 121, 29, 121, 121, 208, 3, 8, + /* 40 */ 8, 707, 707, 707, 230, 230, 230, 230, 230, 230, /* 50 */ 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, - /* 60 */ 283, 283, 25, 25, 25, 25, 25, 25, 25, 121, - /* 70 */ 121, 121, 174, 121, 121, 121, 45, 45, 121, 121, - /* 80 */ 121, 276, 276, 194, 45, 121, 121, 121, 121, 121, + /* 60 */ 230, 230, 230, 230, 283, 283, 283, 25, 25, 25, + /* 70 */ 25, 25, 25, 25, 121, 121, 121, 316, 121, 121, + /* 80 */ 121, 45, 45, 121, 121, 121, 296, 296, 95, 45, /* 90 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, /* 100 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, /* 110 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, /* 120 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, - /* 130 */ 121, 121, 121, 121, 121, 121, 121, 121, 424, 424, - /* 140 */ 424, 373, 373, 373, 424, 373, 424, 377, 376, 380, - /* 150 */ 387, 388, 378, 393, 403, 442, 420, 424, 424, 424, - /* 160 */ 459, 3, 3, 424, 424, 478, 482, 519, 487, 486, - /* 170 */ 518, 489, 493, 459, 424, 498, 498, 424, 498, 424, - /* 180 */ 498, 424, 424, 697, 697, 27, 100, 127, 100, 100, - /* 190 */ 53, 182, 223, 223, 223, 223, 237, 250, 273, 318, - /* 200 */ 318, 318, 318, 256, 258, 215, 207, 207, 269, 361, - /* 210 */ 130, 305, 312, 297, 154, 160, 298, 299, 300, 306, - /* 220 */ 308, 216, 322, 303, 281, 301, 313, 314, 315, 316, - /* 230 */ 323, 97, 289, 293, 296, 331, 302, 434, 438, 370, - /* 240 */ 574, 435, 575, 577, 439, 583, 584, 501, 503, 458, - /* 250 */ 483, 488, 508, 502, 492, 511, 509, 510, 515, 522, - /* 260 */ 523, 520, 526, 626, 527, 528, 530, 529, 512, 531, - /* 270 */ 513, 532, 535, 521, 537, 488, 538, 536, 540, 539, - /* 280 */ 569, 637, 643, 644, 645, 646, 647, 573, 639, 579, - /* 290 */ 516, 652, 524, 533, 549, 549, 648, 525, 534, 654, - /* 300 */ 541, 543, 549, 655, 656, 557, 549, 661, 662, 663, - /* 310 */ 664, 665, 667, 668, 670, 671, 672, 673, 674, 677, - /* 320 */ 678, 679, 680, 681, 578, 608, 669, 675, 629, 632, - /* 330 */ 692, + /* 130 */ 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, + /* 140 */ 121, 121, 440, 440, 440, 392, 392, 392, 440, 392, + /* 150 */ 440, 399, 398, 395, 396, 403, 414, 419, 422, 431, + /* 160 */ 446, 440, 440, 440, 408, 3, 3, 440, 440, 493, + /* 170 */ 498, 537, 505, 504, 536, 507, 510, 408, 440, 516, + /* 180 */ 516, 440, 516, 440, 516, 440, 440, 707, 707, 27, + /* 190 */ 100, 127, 100, 100, 53, 182, 223, 223, 223, 223, + /* 200 */ 237, 250, 273, 318, 318, 318, 318, 242, 256, 224, + /* 210 */ 207, 207, 233, 269, 272, 342, 348, 309, 154, 160, + /* 220 */ 317, 320, 323, 325, 328, 387, 266, 358, 307, 276, + /* 230 */ 330, 331, 336, 337, 338, 344, 312, 315, 321, 322, + /* 240 */ 326, 350, 327, 463, 466, 314, 591, 452, 594, 595, + /* 250 */ 459, 601, 602, 518, 520, 475, 499, 508, 528, 497, + /* 260 */ 509, 530, 526, 529, 532, 535, 539, 538, 541, 645, + /* 270 */ 544, 545, 547, 540, 531, 543, 533, 548, 551, 546, + /* 280 */ 554, 508, 555, 553, 557, 556, 586, 654, 660, 661, + /* 290 */ 662, 663, 664, 590, 656, 596, 657, 534, 542, 566, + /* 300 */ 566, 566, 566, 665, 549, 550, 566, 566, 566, 671, + /* 310 */ 672, 573, 566, 674, 675, 678, 679, 680, 681, 682, + /* 320 */ 683, 686, 687, 688, 689, 690, 691, 692, 693, 694, + /* 330 */ 592, 620, 684, 685, 642, 644, 704, }; -#define YY_REDUCE_COUNT (184) +#define YY_REDUCE_COUNT (188) #define YY_REDUCE_MIN (-247) -#define YY_REDUCE_MAX (423) +#define YY_REDUCE_MAX (441) static const short yy_reduce_ofst[] = { /* 0 */ -178, -27, -27, 74, 74, 99, -244, -217, -119, -76, - /* 10 */ -176, -128, -8, 34, 114, 132, 148, 149, -191, -188, - /* 20 */ -221, -204, 151, 152, 155, -227, -16, -46, 157, -33, - /* 30 */ -113, -84, -123, 173, -193, 189, 196, -162, -36, 183, - /* 40 */ -247, -240, -106, 32, 101, 106, 186, 192, 197, 198, - /* 50 */ 199, 200, 201, 202, 203, 204, 205, 206, 208, 209, - /* 60 */ 143, 212, 232, 235, 236, 238, 239, 240, 241, 278, - /* 70 */ 279, 280, 213, 282, 284, 285, 243, 245, 290, 291, - /* 80 */ 292, 214, 220, 234, 251, 295, 304, 307, 309, 310, - /* 90 */ 311, 317, 319, 320, 321, 324, 325, 326, 327, 328, - /* 100 */ 329, 330, 332, 333, 334, 335, 336, 337, 338, 339, - /* 110 */ 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, - /* 120 */ 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, - /* 130 */ 360, 362, 363, 364, 365, 366, 367, 368, 369, 371, - /* 140 */ 372, 231, 233, 247, 374, 248, 375, 244, 246, 242, - /* 150 */ 249, 252, 254, 379, 381, 383, 385, 382, 384, 386, - /* 160 */ 266, 389, 390, 391, 392, 394, 396, 395, 397, 399, - /* 170 */ 402, 404, 405, 400, 398, 401, 407, 406, 414, 417, - /* 180 */ 418, 421, 422, 412, 423, + /* 10 */ -176, -128, -8, 34, 114, 132, 140, 142, 157, 167, + /* 20 */ 169, 171, -191, -188, -221, -204, 129, 130, 151, -227, + /* 30 */ -16, -46, 112, -33, -113, -84, -123, 200, -193, 217, + /* 40 */ 219, -162, -36, 58, -247, -240, -106, -51, -35, -18, + /* 50 */ -5, 66, 126, 175, 192, 197, 205, 206, 211, 220, + /* 60 */ 222, 225, 226, 227, 187, 239, 240, 253, 254, 255, + /* 70 */ 257, 258, 259, 260, 292, 297, 298, 241, 299, 300, + /* 80 */ 302, 261, 262, 303, 306, 308, 238, 243, 263, 268, + /* 90 */ 313, 319, 324, 329, 332, 333, 334, 335, 339, 340, + /* 100 */ 341, 343, 345, 346, 347, 349, 351, 352, 353, 354, + /* 110 */ 355, 356, 357, 359, 360, 361, 362, 363, 364, 365, + /* 120 */ 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, + /* 130 */ 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, + /* 140 */ 386, 388, 389, 390, 391, 249, 251, 252, 393, 265, + /* 150 */ 394, 264, 267, 270, 274, 289, 397, 271, 400, 277, + /* 160 */ 404, 401, 402, 405, 281, 406, 407, 409, 410, 411, + /* 170 */ 413, 412, 415, 416, 420, 418, 423, 417, 421, 432, + /* 180 */ 433, 424, 434, 436, 437, 439, 441, 438, 435, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 803, 916, 862, 928, 850, 859, 1062, 1062, 803, 803, - /* 10 */ 803, 803, 803, 803, 803, 803, 803, 803, 975, 822, - /* 20 */ 1062, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 30 */ 859, 803, 803, 865, 859, 865, 865, 970, 900, 918, - /* 40 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 50 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 60 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 70 */ 803, 803, 977, 983, 980, 803, 803, 803, 985, 803, - /* 80 */ 803, 1005, 1005, 968, 803, 803, 803, 803, 803, 803, - /* 90 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 100 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 110 */ 803, 803, 803, 803, 848, 803, 846, 803, 803, 803, - /* 120 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 130 */ 803, 833, 803, 803, 803, 803, 803, 803, 824, 824, - /* 140 */ 824, 803, 803, 803, 824, 803, 824, 1012, 1016, 1010, - /* 150 */ 998, 1006, 997, 993, 991, 990, 1020, 824, 824, 824, - /* 160 */ 863, 859, 859, 824, 824, 881, 879, 877, 869, 875, - /* 170 */ 871, 873, 867, 851, 824, 857, 857, 824, 857, 824, - /* 180 */ 857, 824, 824, 900, 918, 803, 1021, 803, 1061, 1011, - /* 190 */ 1051, 1050, 1057, 1049, 1048, 1047, 803, 803, 803, 1043, - /* 200 */ 1044, 1046, 1045, 803, 803, 803, 1053, 1052, 803, 803, - /* 210 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 220 */ 803, 803, 1023, 803, 1017, 1013, 803, 803, 803, 803, - /* 230 */ 803, 803, 803, 803, 803, 930, 803, 803, 803, 803, - /* 240 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 250 */ 967, 803, 803, 803, 803, 803, 979, 978, 803, 803, - /* 260 */ 803, 803, 803, 803, 803, 803, 803, 1007, 803, 999, - /* 270 */ 803, 803, 803, 803, 803, 942, 803, 803, 803, 803, - /* 280 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 290 */ 803, 803, 803, 803, 1075, 1072, 803, 803, 803, 803, - /* 300 */ 803, 803, 1068, 803, 803, 803, 1065, 803, 803, 803, - /* 310 */ 803, 803, 803, 803, 803, 803, 803, 803, 803, 803, - /* 320 */ 803, 803, 803, 803, 884, 803, 831, 829, 803, 820, - /* 330 */ 803, + /* 0 */ 811, 924, 870, 936, 858, 867, 1070, 1070, 811, 811, + /* 10 */ 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, + /* 20 */ 811, 811, 983, 830, 1070, 811, 811, 811, 811, 811, + /* 30 */ 811, 811, 811, 811, 867, 811, 811, 873, 867, 873, + /* 40 */ 873, 978, 908, 926, 811, 811, 811, 811, 811, 811, + /* 50 */ 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, + /* 60 */ 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, + /* 70 */ 811, 811, 811, 811, 811, 811, 811, 985, 991, 988, + /* 80 */ 811, 811, 811, 993, 811, 811, 1013, 1013, 976, 811, + /* 90 */ 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, + /* 100 */ 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, + /* 110 */ 811, 811, 811, 811, 811, 811, 811, 811, 856, 811, + /* 120 */ 854, 811, 811, 811, 811, 811, 811, 811, 811, 811, + /* 130 */ 811, 811, 811, 811, 811, 841, 811, 811, 811, 811, + /* 140 */ 811, 811, 832, 832, 832, 811, 811, 811, 832, 811, + /* 150 */ 832, 1020, 1024, 1018, 1006, 1014, 1005, 1001, 999, 998, + /* 160 */ 1028, 832, 832, 832, 871, 867, 867, 832, 832, 889, + /* 170 */ 887, 885, 877, 883, 879, 881, 875, 859, 832, 865, + /* 180 */ 865, 832, 865, 832, 865, 832, 832, 908, 926, 811, + /* 190 */ 1029, 811, 1069, 1019, 1059, 1058, 1065, 1057, 1056, 1055, + /* 200 */ 811, 811, 811, 1051, 1052, 1054, 1053, 811, 811, 811, + /* 210 */ 1061, 1060, 811, 811, 811, 811, 811, 811, 811, 811, + /* 220 */ 811, 811, 811, 811, 811, 811, 1031, 811, 1025, 1021, + /* 230 */ 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, + /* 240 */ 811, 938, 811, 811, 811, 811, 811, 811, 811, 811, + /* 250 */ 811, 811, 811, 811, 811, 811, 975, 811, 811, 811, + /* 260 */ 811, 811, 987, 986, 811, 811, 811, 811, 811, 811, + /* 270 */ 811, 811, 811, 1015, 811, 1007, 811, 811, 811, 811, + /* 280 */ 811, 950, 811, 811, 811, 811, 811, 811, 811, 811, + /* 290 */ 811, 811, 811, 811, 811, 811, 811, 811, 811, 1088, + /* 300 */ 1083, 1084, 1081, 811, 811, 811, 1080, 1075, 1076, 811, + /* 310 */ 811, 811, 1073, 811, 811, 811, 811, 811, 811, 811, + /* 320 */ 811, 811, 811, 811, 811, 811, 811, 811, 811, 811, + /* 330 */ 892, 811, 839, 837, 811, 828, 811, }; /********** End of lemon-generated parsing tables *****************************/ @@ -634,7 +636,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* SYNCDB => nothing */ 0, /* ADD => nothing */ 0, /* COLUMN => nothing */ - 0, /* LENGTH => nothing */ + 0, /* MODIFY => nothing */ 0, /* TAG => nothing */ 0, /* CHANGE => nothing */ 0, /* SET => nothing */ @@ -909,7 +911,7 @@ static const char *const yyTokenName[] = { /* 136 */ "SYNCDB", /* 137 */ "ADD", /* 138 */ "COLUMN", - /* 139 */ "LENGTH", + /* 139 */ "MODIFY", /* 140 */ "TAG", /* 141 */ "CHANGE", /* 142 */ "SET", @@ -1302,20 +1304,23 @@ static const char *const yyRuleName[] = { /* 258 */ "cmd ::= SYNCDB ids REPLICA", /* 259 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", /* 260 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 261 */ "cmd ::= ALTER TABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER", + /* 261 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", /* 262 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", /* 263 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", /* 264 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", /* 265 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 266 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 267 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 268 */ "cmd ::= ALTER STABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER", - /* 269 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 270 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 271 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 272 */ "cmd ::= KILL CONNECTION INTEGER", - /* 273 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 274 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 266 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", + /* 267 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 268 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 269 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", + /* 270 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 271 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 272 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 273 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", + /* 274 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", + /* 275 */ "cmd ::= KILL CONNECTION INTEGER", + /* 276 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 277 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -2043,20 +2048,23 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 189, /* (258) cmd ::= SYNCDB ids REPLICA */ 189, /* (259) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ 189, /* (260) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - 189, /* (261) cmd ::= ALTER TABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ + 189, /* (261) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ 189, /* (262) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ 189, /* (263) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ 189, /* (264) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ 189, /* (265) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - 189, /* (266) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - 189, /* (267) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - 189, /* (268) cmd ::= ALTER STABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ - 189, /* (269) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - 189, /* (270) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - 189, /* (271) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - 189, /* (272) cmd ::= KILL CONNECTION INTEGER */ - 189, /* (273) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - 189, /* (274) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + 189, /* (266) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + 189, /* (267) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + 189, /* (268) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + 189, /* (269) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + 189, /* (270) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + 189, /* (271) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + 189, /* (272) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + 189, /* (273) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + 189, /* (274) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + 189, /* (275) cmd ::= KILL CONNECTION INTEGER */ + 189, /* (276) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + 189, /* (277) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -2323,20 +2331,23 @@ static const signed char yyRuleInfoNRhs[] = { -3, /* (258) cmd ::= SYNCDB ids REPLICA */ -7, /* (259) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ -7, /* (260) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - -9, /* (261) cmd ::= ALTER TABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ + -7, /* (261) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ -7, /* (262) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ -7, /* (263) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ -8, /* (264) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ -9, /* (265) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - -7, /* (266) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - -7, /* (267) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - -9, /* (268) cmd ::= ALTER STABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ - -7, /* (269) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - -7, /* (270) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - -8, /* (271) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - -3, /* (272) cmd ::= KILL CONNECTION INTEGER */ - -5, /* (273) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - -5, /* (274) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + -7, /* (266) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + -7, /* (267) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + -7, /* (268) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + -7, /* (269) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + -7, /* (270) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + -7, /* (271) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + -8, /* (272) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + -9, /* (273) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + -7, /* (274) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + -3, /* (275) cmd ::= KILL CONNECTION INTEGER */ + -5, /* (276) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + -5, /* (277) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -3340,16 +3351,10 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 261: /* cmd ::= ALTER TABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ + case 261: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ { - yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; - - toTSDBType(yymsp[-1].minor.yy0.type); - SArray* K = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); - toTSDBType(yymsp[0].minor.yy0.type); - K = tVariantListAppendToken(K, &yymsp[0].minor.yy0, -1); - - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, K, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy285, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; @@ -3397,14 +3402,21 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 266: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 266: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ +{ + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy285, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + break; + case 267: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy285, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 267: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 268: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3415,27 +3427,21 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 268: /* cmd ::= ALTER STABLE ids cpxName ALTER COLUMN LENGTH ids INTEGER */ + case 269: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ { - yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; - - toTSDBType(yymsp[-1].minor.yy0.type); - SArray* K = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); - toTSDBType(yymsp[0].minor.yy0.type); - K = tVariantListAppendToken(K, &yymsp[0].minor.yy0, -1); - - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, K, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy285, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 269: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 270: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy285, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 270: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 271: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3446,7 +3452,7 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 271: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 272: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3460,13 +3466,32 @@ static YYACTIONTYPE yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 272: /* cmd ::= KILL CONNECTION INTEGER */ + case 273: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ +{ + yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; + + toTSDBType(yymsp[-2].minor.yy0.type); + SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy362, -1); + + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + break; + case 274: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ +{ + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy285, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); +} + break; + case 275: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 273: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 276: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 274: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 277: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 24852abbf2..0e661e84c5 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -218,7 +218,7 @@ static SKeyword keywordTable[] = { {"PARTITIONS", TK_PARTITIONS}, {"TOPIC", TK_TOPIC}, {"TOPICS", TK_TOPICS}, - {"LENGTH", TK_LENGTH} + {"MODIFY", TK_MODIFY} }; static const char isIdChar[] = { diff --git a/tests/script/general/parser/alter_column.sim b/tests/script/general/parser/alter_column.sim index 7f30498d06..fe109352d1 100644 --- a/tests/script/general/parser/alter_column.sim +++ b/tests/script/general/parser/alter_column.sim @@ -25,20 +25,49 @@ sql use $db ##### alter table test, simeplest case sql create table tb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10)) sql insert into tb values (now, 1, "1", "1") -sql alter table tb alter column length c2 20; +sql alter table tb modify column c2 binary(20); if $rows != 0 then return -1 endi -sql alter table tb alter column length c3 20; +sql alter table tb modify column c3 nchar(20); if $rows != 0 then return -1 endi -sql create stable stb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10)) tags(id int) -sql create table tb1 using stb tags(1) +sql create stable stb (ts timestamp, c1 int, c2 binary(10), c3 nchar(10)) tags(id1 int, id2 binary(10), id3 nchar(10)) +sql create table tb1 using stb tags(1, "a", "b") sql insert into tb1 values (now, 1, "1", "1") -sql alter stable stb alter column length c2 20; +sql alter stable stb modify column c2 binary(20); +if $rows != 0 then + return -1 +endi +sql alter table stb modify column c2 binary(30); +if $rows != 0 then + return -1 +endi +sql alter stable stb modify column c3 nchar(20); +if $rows != 0 then + return -1 +endi +sql alter table stb modify column c3 nchar(30); +if $rows != 0 then + return -1 +endi + +sql alter table stb modify tag id2 binary(11); +if $rows != 0 then + return -1 +endi +sql alter stable stb modify tag id2 binary(11); +if $rows != 0 then + return -1 +endi +sql alter table stb modify tag id3 nchar(11); +if $rows != 0 then + return -1 +endi +sql alter stable stb modify tag id3 nchar(11); if $rows != 0 then return -1 endi @@ -46,10 +75,44 @@ endi ##### ILLEGAL OPERATIONS # try dropping columns that are defined in metric -sql_error alter table tb alter column length c1 10; -sql_error alter stable tb alter column length c2 10; -sql_error alter table tb1 alter column length c2 10; -sql_error alter stable tb1 alter column length c2 10; +sql_error alter table tb modify column c1 binary(10); +sql_error alter table tb modify column c1 double; +sql_error alter table tb modify column c2 int; +sql_error alter table tb modify column c2 binary(10); +sql_error alter table tb modify column c2 binary(9); +sql_error alter table tb modify column c2 binary(-9); +sql_error alter table tb modify column c2 binary(0); +sql_error alter table tb modify column c2 binary(17000); +sql_error alter table tb modify column c2 nchar(30); +sql_error alter table tb modify column c3 double; +sql_error alter table tb modify column c3 nchar(10); +sql_error alter table tb modify column c3 nchar(0); +sql_error alter table tb modify column c3 nchar(-1); +sql_error alter table tb modify column c3 binary(80); +sql_error alter table tb modify column c3 nchar(17000); +sql_error alter table tb modify column c3 nchar(100), c2 binary(30); +sql_error alter table tb modify column c1 nchar(100), c2 binary(30); +sql_error alter stable tb modify column c2 binary(30); +sql_error alter table tb modify tag c2 binary(30); +sql_error alter table stb modify tag id2 binary(10); +sql_error alter table stb modify tag id2 nchar(30); +sql_error alter stable stb modify tag id2 binary(10); +sql_error alter stable stb modify tag id2 nchar(30); +sql_error alter table stb modify tag id3 nchar(10); +sql_error alter table stb modify tag id3 binary(30); +sql_error alter stable stb modify tag id3 nchar(10); +sql_error alter stable stb modify tag id3 binary(30); +sql_error alter stable stb modify tag id1 binary(30); +sql_error alter stable stb modify tag c1 binary(30); + + +sql_error alter table tb1 modify column c2 binary(30); +sql_error alter table tb1 modify column c3 nchar(30); +sql_error alter table tb1 modify tag id2 binary(30); +sql_error alter table tb1 modify tag id3 nchar(30); +sql_error alter stable tb1 modify tag id2 binary(30); +sql_error alter stable tb1 modify tag id3 nchar(30); +sql_error alter stable tb1 modify column c2 binary(30); system sh/exec.sh -n dnode1 -s stop -x SIGINT From 701ca6419de59d27af7ed5dc22a2c2d99c5c8b91 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 2 Jun 2021 14:22:59 +0800 Subject: [PATCH 68/82] fix bug --- src/client/src/tscUtil.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 8042f032c8..b5dfc956dd 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1119,6 +1119,8 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols); + pOutput->precision = pSqlObjList[0]->res.precision; + SSchema* schema = NULL; if (px->numOfTables > 1) { SOperatorInfo** p = calloc(px->numOfTables, POINTER_BYTES); @@ -4477,4 +4479,4 @@ SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) { } return info; -} \ No newline at end of file +} From 3b867c876de1da897a04e908b5fef33c20e12a48 Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Wed, 2 Jun 2021 15:07:17 +0800 Subject: [PATCH 69/82] [TD-4476] add manual test case --- .../taosdemoAllTest/testAutoCreateTemp.py | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 tests/pytest/tools/taosdemoAllTest/testAutoCreateTemp.py diff --git a/tests/pytest/tools/taosdemoAllTest/testAutoCreateTemp.py b/tests/pytest/tools/taosdemoAllTest/testAutoCreateTemp.py new file mode 100644 index 0000000000..2d98b1c463 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/testAutoCreateTemp.py @@ -0,0 +1,135 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: auto_create + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YYY.json -y " % binPath) # drop = yes, exist = yes, auto_create = yes + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YYN.json -y " % binPath) # drop = yes, exist = yes, auto_create = no + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YNY.json -y " % binPath) # drop = yes, exist = no, auto_create = yes + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YNN.json -y " % binPath) # drop = yes, exist = no, auto_create = no + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + tdSql.execute('drop database db') + tdSql.execute('create database db') + tdSql.execute('use db') + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NYY.json -y " % binPath) # drop = no, exist = yes, auto_create = yes + tdSql.query('show tables') + tdSql.checkRows(0) + + tdSql.execute('drop database db') + tdSql.execute('create database db') + tdSql.execute('use db') + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NYN.json -y " % binPath) # drop = no, exist = yes, auto_create = no + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(0) + + tdSql.execute('drop database db') + tdSql.execute('create database db') + tdSql.execute('use db') + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NNY.json -y " % binPath) # drop = no, exist = no, auto_create = yes + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + tdSql.execute('drop database db') + tdSql.execute('create database db') + tdSql.execute('use db') + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NNN.json -y " % binPath) # drop = no, exist = no, auto_create = no + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + #the following four test cases are for the exception cases for param auto_create_table + + tdSql.execute('drop database db') + tdSql.execute('create database db') + tdSql.execute('use db') + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NN123.json -y " % binPath) # drop = no, exist = no, auto_create = 123 + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + tdSql.execute('drop database db') + tdSql.execute('create database db') + tdSql.execute('use db') + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NY123.json -y " % binPath) # drop = no, exist = yes, auto_create = 123 + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(0) + + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YN123.json -y " % binPath) # drop = yes, exist = no, auto_create = 123 + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YY123.json -y " % binPath) # drop = yes, exist = yes, auto_create = 123 + tdSql.execute('use db') + tdSql.query('show tables') + tdSql.checkRows(20) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 2bcd1f5a597f0d5a4acc1d340845fed48e729186 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 2 Jun 2021 15:25:32 +0800 Subject: [PATCH 70/82] fix windows compile issue --- src/client/src/tscSQLParser.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e41a9cb56b..a634362cf2 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5410,7 +5410,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = strlen(pItem->name)}; + SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)}; if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17); } @@ -5447,7 +5447,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { } SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER; - SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = strlen(pItem->name)}; + SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)}; if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg17); } From 97e9070a21f83d071d320c55f3fddaef0fe9df22 Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Wed, 2 Jun 2021 16:34:41 +0800 Subject: [PATCH 71/82] [TD-4476] test case finished --- tests/pytest/manualTest/manual_alter_comp.py | 106 ++++++++++++++++++ .../taosdemoAllTest/manual_block1_comp.json | 8 +- 2 files changed, 110 insertions(+), 4 deletions(-) create mode 100644 tests/pytest/manualTest/manual_alter_comp.py diff --git a/tests/pytest/manualTest/manual_alter_comp.py b/tests/pytest/manualTest/manual_alter_comp.py new file mode 100644 index 0000000000..1b6d1d1f81 --- /dev/null +++ b/tests/pytest/manualTest/manual_alter_comp.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes + +##TODO: auto test version is currently unsupported, need to come up with +# an auto test version in the future +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getRootPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + print(selfPath) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + print(projPath) + else: + projPath = selfPath[:selfPath.find("tests")] + print("test" + projPath) + + for root, dirs, files in os.walk(projPath): + if ('data' in dirs and 'sim' in root): + rootPath = root + + return rootPath + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + + + def run(self): + dnodePath = self.getRootPath() + os.system(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*') + tdSql.prepare() + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + #comp is at 14 + tdSql.query('show databases') + tdSql.checkData(0,14,2) + os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) + tdDnodes.stop(1) + print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data/vnode") + input("please check disk usage for taosd. After checking, press enter") + + os.system(f'sudo rm -rf {dnodePath}/data/* {dnodePath}/log/*') + #print(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*') #for showing the command ran + input("please check if the pervious data is being deleted. Then, press enter") + tdDnodes.start(1) + tdSql.prepare() + tdSql.query('show databases') + tdSql.checkData(0,14,2) + tdSql.execute('alter database db comp 0') + tdSql.query('show databases') + tdSql.checkData(0,14,0) + tdDnodes.stop(1) + tdDnodes.start(1) + os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) + print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data") + input("please check disk usage for taosd. After checking, press enter") + + ##test result + # 2021/06/02 comp=2: file size = 6.4M comp=0 file size=399M. Test past + # each row entered is identical Tester - Baosheng Chang + + def stop(self): + tdSql.close() + tdLog.debug("%s alter block manual check finish" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json index 3ced8272b2..a6ac674dd7 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json @@ -38,7 +38,7 @@ "childtable_prefix": "stb_", "auto_create_table": "no", "batch_create_tbl_num": 20, - "data_source": "rand", + "data_source": "sample", "insert_mode": "taosc", "insert_rows": 10000, "childtable_limit": 10, @@ -51,9 +51,9 @@ "timestamp_step": 1, "start_timestamp": "2019-10-01 00:00:00.000", "sample_format": "csv", - "sample_file": "./sample.csv", - "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] From 3a12c502c7763bd231f2ff1f23dade5b2e45f5ef Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Wed, 2 Jun 2021 16:38:57 +0800 Subject: [PATCH 72/82] [TD-4475] remove temp file --- .../taosdemoAllTest/testAutoCreateTemp.py | 135 ------------------ 1 file changed, 135 deletions(-) delete mode 100644 tests/pytest/tools/taosdemoAllTest/testAutoCreateTemp.py diff --git a/tests/pytest/tools/taosdemoAllTest/testAutoCreateTemp.py b/tests/pytest/tools/taosdemoAllTest/testAutoCreateTemp.py deleted file mode 100644 index 2d98b1c463..0000000000 --- a/tests/pytest/tools/taosdemoAllTest/testAutoCreateTemp.py +++ /dev/null @@ -1,135 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -from util.log import * -from util.cases import * -from util.sql import * - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - def getBuildPath(self): - selfPath = os.path.dirname(os.path.realpath(__file__)) - - if ("community" in selfPath): - projPath = selfPath[:selfPath.find("community")] - else: - projPath = selfPath[:selfPath.find("tests")] - - for root, dirs, files in os.walk(projPath): - if ("taosd" in files): - rootRealPath = os.path.dirname(os.path.realpath(root)) - if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] - break - return buildPath - - def run(self): - buildPath = self.getBuildPath() - if (buildPath == ""): - tdLog.exit("taosd not found!") - else: - tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" - - # insert: auto_create - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YYY.json -y " % binPath) # drop = yes, exist = yes, auto_create = yes - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YYN.json -y " % binPath) # drop = yes, exist = yes, auto_create = no - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YNY.json -y " % binPath) # drop = yes, exist = no, auto_create = yes - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YNN.json -y " % binPath) # drop = yes, exist = no, auto_create = no - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - tdSql.execute('drop database db') - tdSql.execute('create database db') - tdSql.execute('use db') - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NYY.json -y " % binPath) # drop = no, exist = yes, auto_create = yes - tdSql.query('show tables') - tdSql.checkRows(0) - - tdSql.execute('drop database db') - tdSql.execute('create database db') - tdSql.execute('use db') - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NYN.json -y " % binPath) # drop = no, exist = yes, auto_create = no - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(0) - - tdSql.execute('drop database db') - tdSql.execute('create database db') - tdSql.execute('use db') - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NNY.json -y " % binPath) # drop = no, exist = no, auto_create = yes - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - tdSql.execute('drop database db') - tdSql.execute('create database db') - tdSql.execute('use db') - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NNN.json -y " % binPath) # drop = no, exist = no, auto_create = no - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - #the following four test cases are for the exception cases for param auto_create_table - - tdSql.execute('drop database db') - tdSql.execute('create database db') - tdSql.execute('use db') - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NN123.json -y " % binPath) # drop = no, exist = no, auto_create = 123 - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - tdSql.execute('drop database db') - tdSql.execute('create database db') - tdSql.execute('use db') - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-NY123.json -y " % binPath) # drop = no, exist = yes, auto_create = 123 - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(0) - - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YN123.json -y " % binPath) # drop = yes, exist = no, auto_create = 123 - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - os.system("%staosdemo -f tools/taosdemoAllTest/insert-drop-exist-auto-YY123.json -y " % binPath) # drop = yes, exist = yes, auto_create = 123 - tdSql.execute('use db') - tdSql.query('show tables') - tdSql.checkRows(20) - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) From e39c9eed5de45a0bd01c2402596af5ce5882289e Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Wed, 2 Jun 2021 17:03:55 +0800 Subject: [PATCH 73/82] [TD-4476] modified test case --- tests/pytest/manualTest/manual_alter_comp.py | 32 ++++++++++++++++---- 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/tests/pytest/manualTest/manual_alter_comp.py b/tests/pytest/manualTest/manual_alter_comp.py index 1b6d1d1f81..6c3e0fc296 100644 --- a/tests/pytest/manualTest/manual_alter_comp.py +++ b/tests/pytest/manualTest/manual_alter_comp.py @@ -70,31 +70,51 @@ class TDTestCase: binPath = buildPath+ "/build/bin/" #comp is at 14 + #check disk usage when comp=2 tdSql.query('show databases') + tdSql.execute('alter database db blocks 3') # minimize the data in cache tdSql.checkData(0,14,2) os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) - tdDnodes.stop(1) print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data/vnode") + print('comp = 2') input("please check disk usage for taosd. After checking, press enter") - + + #removing all data file os.system(f'sudo rm -rf {dnodePath}/data/* {dnodePath}/log/*') #print(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*') #for showing the command ran input("please check if the pervious data is being deleted. Then, press enter") - tdDnodes.start(1) + + #check disk usage when comp=0 tdSql.prepare() tdSql.query('show databases') tdSql.checkData(0,14,2) tdSql.execute('alter database db comp 0') tdSql.query('show databases') tdSql.checkData(0,14,0) - tdDnodes.stop(1) - tdDnodes.start(1) os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data") + print('comp = 0') + input("please check disk usage for taosd. After checking, press enter") + + #removing all data file + os.system(f'sudo rm -rf {dnodePath}/data/* {dnodePath}/log/*') + #print(f'rm -rf {dnodePath}/data/* {dnodePath}/log/*') #for showing the command ran + input("please check if the pervious data is being deleted. Then, press enter") + + #check disk usage when comp=1 + tdSql.prepare() + tdSql.query('show databases') + tdSql.checkData(0,14,2) + tdSql.execute('alter database db comp 1') + tdSql.query('show databases') + tdSql.checkData(0,14,1) + os.system("%staosdemo -f tools/taosdemoAllTest/manual_block1_comp.json" % binPath) + print("default location is at /home/bryan/Documents/Github/TDengine/sim/dnode1/data") + print('comp = 1') input("please check disk usage for taosd. After checking, press enter") ##test result - # 2021/06/02 comp=2: file size = 6.4M comp=0 file size=399M. Test past + # 2021/06/02 comp=2:13M comp=1:57M comp=0:399M. Test past # each row entered is identical Tester - Baosheng Chang def stop(self): From dff270c96e5f99863077b5a4a7f6f36f2448dc0f Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 2 Jun 2021 17:52:05 +0800 Subject: [PATCH 74/82] [TD-4435]: add test case --- .../jdbc/TSDBPreparedStatementTest.java | 127 +++++++++++++++++- 1 file changed, 123 insertions(+), 4 deletions(-) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index 52858e7f88..277ca447f5 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -243,15 +243,15 @@ public class TSDBPreparedStatementTest { s.setNString(1, s2, 4); random = 10 + r.nextInt(5); - ArrayList s5 = new ArrayList(); + ArrayList s3 = new ArrayList(); for(int i = 0; i < numOfRows; i++) { if(i % random == 0) { - s5.add(null); + s3.add(null); }else{ - s5.add("test" + i % 10); + s3.add("test" + i % 10); } } - s.setString(2, s5, 10); + s.setString(2, s3, 10); s.columnDataAddBatch(); s.columnDataExecuteBatch(); @@ -268,7 +268,126 @@ public class TSDBPreparedStatementTest { } } + @Test + public void bindDataWithSingleTagTest() throws SQLException { + Statement stmt = conn.createStatement(); + String types[] = new String[] {"tinyint", "smallint", "int", "bigint", "bool", "float", "double", "binary(10)", "nchar(10)"}; + + for (String type : types) { + stmt.execute("drop table if exists weather_test"); + stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t " + type + ")"); + + int numOfRows = 1; + + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?, ?)"); + Random r = new Random(); + s.setTableName("w1"); + + switch(type) { + case "tinyint": + case "smallint": + case "int": + case "bigint": + s.setTagInt(0, 1); + break; + case "float": + s.setTagFloat(0, 1.23f); + break; + case "double": + s.setTagDouble(0, 3.14159265); + break; + case "bool": + s.setTagBoolean(0, true); + break; + case "binary(10)": + s.setTagString(0, "test"); + break; + case "nchar(10)": + s.setTagNString(0, "test"); + break; + default: + break; + } + + + ArrayList ts = new ArrayList(); + for(int i = 0; i < numOfRows; i++) { + ts.add(System.currentTimeMillis() + i); + } + s.setTimestamp(0, ts); + + int random = 10 + r.nextInt(5); + ArrayList s2 = new ArrayList(); + for(int i = 0; i < numOfRows; i++) { + s2.add("分支" + i % 4); + } + s.setNString(1, s2, 10); + + random = 10 + r.nextInt(5); + ArrayList s3 = new ArrayList(); + for(int i = 0; i < numOfRows; i++) { + s3.add("test" + i % 4); + } + s.setString(2, s3, 10); + + s.columnDataAddBatch(); + s.columnDataExecuteBatch(); + s.columnDataCloseBatch(); + + String sql = "select * from weather_test"; + PreparedStatement statement = conn.prepareStatement(sql); + ResultSet rs = statement.executeQuery(); + int rows = 0; + while(rs.next()) { + rows++; + } + Assert.assertEquals(numOfRows, rows); + } + } + + + @Test + public void bindDataWithMultipleTagsTest() throws SQLException { + Statement stmt = conn.createStatement(); + + stmt.execute("drop table if exists weather_test"); + stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))"); + + int numOfRows = 1; + + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)"); + s.setTableName("w2"); + s.setTagInt(0, 1); + s.setTagString(1, "test"); + + + ArrayList ts = new ArrayList(); + for(int i = 0; i < numOfRows; i++) { + ts.add(System.currentTimeMillis() + i); + } + s.setTimestamp(0, ts); + + ArrayList s2 = new ArrayList(); + for(int i = 0; i < numOfRows; i++) { + s2.add("test" + i % 4); + } + s.setString(1, s2, 10); + + s.columnDataAddBatch(); + s.columnDataExecuteBatch(); + s.columnDataCloseBatch(); + + String sql = "select * from weather_test"; + PreparedStatement statement = conn.prepareStatement(sql); + ResultSet rs = statement.executeQuery(); + int rows = 0; + while(rs.next()) { + rows++; + } + Assert.assertEquals(numOfRows, rows); + + } @Test public void setBoolean() throws SQLException { From b7ea0532588690be03aa217e71082e75fa307e1d Mon Sep 17 00:00:00 2001 From: lichuang Date: Wed, 2 Jun 2021 18:33:21 +0800 Subject: [PATCH 75/82] [TD-1920]fix compact bug,load dnode-globalcfg mode --- src/dnode/src/dnodeMain.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 64da11f312..140f830c9f 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -88,6 +88,7 @@ static SStep tsDnodeSteps[] = { static SStep tsDnodeCompactSteps[] = { {"dnode-tfile", tfInit, tfCleanup}, + {"dnode-globalcfg", taosCheckGlobalCfg, NULL}, {"dnode-storage", dnodeInitStorage, dnodeCleanupStorage}, {"dnode-eps", dnodeInitEps, dnodeCleanupEps}, {"dnode-wal", walInit, walCleanUp}, From b141d31939151c9903f1680bb0d96606b4f26ac1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 2 Jun 2021 23:11:44 +0800 Subject: [PATCH 76/82] [TD-4507]: disable jdbc build on windows for appveyor. (#6352) --- .appveyor.yml | 98 ++++++++++++++++++++++++------------------------- cmake/env.inc | 14 ++++--- cmake/input.inc | 6 +++ 3 files changed, 63 insertions(+), 55 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index ee1dc91767..e7802b3d0d 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -1,49 +1,49 @@ -version: 1.0.{build} -image: - - Visual Studio 2015 - - macos -environment: - matrix: - - ARCH: amd64 - - ARCH: x86 -matrix: - exclude: - - image: macos - ARCH: x86 -for: - - - matrix: - only: - - image: Visual Studio 2015 - clone_folder: c:\dev\TDengine - clone_depth: 1 - - init: - - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH% - - before_build: - - cd c:\dev\TDengine - - md build - - build_script: - - cd build - - cmake -G "NMake Makefiles" .. - - nmake install - - - matrix: - only: - - image: macos - clone_depth: 1 - - build_script: - - mkdir debug - - cd debug - - cmake .. > /dev/null - - make > /dev/null -notifications: -- provider: Email - to: - - sangshuduo@gmail.com - on_build_success: true - on_build_failure: true - on_build_status_changed: true +version: 1.0.{build} +image: + - Visual Studio 2015 + - macos +environment: + matrix: + - ARCH: amd64 + - ARCH: x86 +matrix: + exclude: + - image: macos + ARCH: x86 +for: + - + matrix: + only: + - image: Visual Studio 2015 + clone_folder: c:\dev\TDengine + clone_depth: 1 + + init: + - call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %ARCH% + + before_build: + - cd c:\dev\TDengine + - md build + + build_script: + - cd build + - cmake -G "NMake Makefiles" .. -DBUILD_JDBC=false + - nmake install + - + matrix: + only: + - image: macos + clone_depth: 1 + + build_script: + - mkdir debug + - cd debug + - cmake .. > /dev/null + - make > /dev/null +notifications: +- provider: Email + to: + - sangshuduo@gmail.com + on_build_success: true + on_build_failure: true + on_build_status_changed: true diff --git a/cmake/env.inc b/cmake/env.inc index 3989993953..6c1ce8fd89 100755 --- a/cmake/env.inc +++ b/cmake/env.inc @@ -14,11 +14,13 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR}) MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH}) MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH}) -FIND_PROGRAM(TD_MVN_INSTALLED mvn) -IF (TD_MVN_INSTALLED) - MESSAGE(STATUS "MVN is installed and JDBC will be compiled") -ELSE () - MESSAGE(STATUS "MVN is not installed and JDBC is not compiled") +IF (TD_BUILD_JDBC) + FIND_PROGRAM(TD_MVN_INSTALLED mvn) + IF (TD_MVN_INSTALLED) + MESSAGE(STATUS "MVN is installed and JDBC will be compiled") + ELSE () + MESSAGE(STATUS "MVN is not installed and JDBC is not compiled") + ENDIF () ENDIF () # @@ -55,4 +57,4 @@ ELSE () SET(CMAKE_BUILD_TYPE "Debug") MESSAGE(STATUS "Build Debug Version as default") ENDIF() -ENDIF () \ No newline at end of file +ENDIF () diff --git a/cmake/input.inc b/cmake/input.inc index 543114ad09..9b72a35d94 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -77,3 +77,9 @@ IF (${JEMALLOC_ENABLED} MATCHES "true") SET(TD_JEMALLOC_ENABLED TRUE) MESSAGE(STATUS "build with jemalloc enabled") ENDIF () + +SET(TD_BUILD_JDBC TRUE) + +IF (${BUILD_JDBC} MATCHES "false") + SET(TD_BUILD_JDBC FALSE) +ENDIF () From d389df2d848c94011f0e626bd93860db35f4fe99 Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 3 Jun 2021 09:41:03 +0800 Subject: [PATCH 77/82] [TD-1920]fix compact bug,load necessary modules --- src/dnode/src/dnodeMain.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 140f830c9f..a4ff9df203 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -90,11 +90,15 @@ static SStep tsDnodeCompactSteps[] = { {"dnode-tfile", tfInit, tfCleanup}, {"dnode-globalcfg", taosCheckGlobalCfg, NULL}, {"dnode-storage", dnodeInitStorage, dnodeCleanupStorage}, + {"dnode-cfg", dnodeInitCfg, dnodeCleanupCfg}, {"dnode-eps", dnodeInitEps, dnodeCleanupEps}, + {"dnode-minfos", dnodeInitMInfos, dnodeCleanupMInfos}, {"dnode-wal", walInit, walCleanUp}, + {"dnode-sync", syncInit, syncCleanUp}, {"dnode-mread", dnodeInitMRead, NULL}, {"dnode-mwrite", dnodeInitMWrite, NULL}, {"dnode-mpeer", dnodeInitMPeer, NULL}, + {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes}, {"dnode-modules", dnodeInitModules, dnodeCleanupModules}, }; From e61ee9fb97915aba100b90a3e15cbe4b527669cb Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 3 Jun 2021 11:42:36 +0800 Subject: [PATCH 78/82] [TD-4394]add modify column,tag width implementation --- src/mnode/src/mnodeTable.c | 101 ++++++++++++++++++++----------------- 1 file changed, 54 insertions(+), 47 deletions(-) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 5b699c5e24..be53d353c9 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -93,6 +93,9 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg); static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg); static int32_t mnodeFindSuperTableColumnIndex(SSTableObj *pStable, char *colName); +static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg); +static int32_t mnodeChangeSuperTableTag(SMnodeMsg *pMsg); +static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg); static void mnodeDestroyChildTable(SCTableObj *pTable) { tfree(pTable->info.tableId); @@ -1457,31 +1460,52 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { return code; } -static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) { +static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg) { + SAlterTableMsg *pAlter = pMsg->rpcMsg.pCont; + char* name = pAlter->schema[0].name; SSTableObj *pStable = (SSTableObj *)pMsg->pTable; - int32_t col = mnodeFindSuperTableColumnIndex(pStable, oldName); + int32_t col = mnodeFindSuperTableColumnIndex(pStable, name); if (col < 0) { - mError("msg:%p, app:%p stable:%s, change column, oldName:%s, newName:%s", pMsg, pMsg->rpcMsg.ahandle, - pStable->info.tableId, oldName, newName); + mError("msg:%p, app:%p stable:%s, change column, name:%s", pMsg, pMsg->rpcMsg.ahandle, + pStable->info.tableId, name); return TSDB_CODE_MND_FIELD_NOT_EXIST; } - // int32_t rowSize = 0; - uint32_t len = (uint32_t)strlen(newName); - if (len >= TSDB_COL_NAME_LEN) { - return TSDB_CODE_MND_COL_NAME_TOO_LONG; - } - - if (mnodeFindSuperTableColumnIndex(pStable, newName) >= 0) { - return TSDB_CODE_MND_FIELD_ALREAY_EXIST; - } - // update SSchema *schema = (SSchema *) (pStable->schema + col); - tstrncpy(schema->name, newName, sizeof(schema->name)); + ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR); + schema->bytes = pAlter->schema[0].bytes; + mInfo("msg:%p, app:%p stable %s, start to modify column %s len to %d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, + name, schema->bytes); - mInfo("msg:%p, app:%p stable %s, start to modify column %s to %s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, - oldName, newName); + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsSuperTableSdb, + .pObj = pStable, + .pMsg = pMsg, + .fpRsp = mnodeChangeSuperTableColumnCb + }; + + return sdbUpdateRow(&row); +} + +static int32_t mnodeChangeSuperTableTag(SMnodeMsg *pMsg) { + SAlterTableMsg *pAlter = pMsg->rpcMsg.pCont; + char* name = pAlter->schema[0].name; + SSTableObj *pStable = (SSTableObj *)pMsg->pTable; + int32_t col = mnodeFindSuperTableTagIndex(pStable, name); + if (col < 0) { + mError("msg:%p, app:%p stable:%s, change column, name:%s", pMsg, pMsg->rpcMsg.ahandle, + pStable->info.tableId, name); + return TSDB_CODE_MND_FIELD_NOT_EXIST; + } + + // update + SSchema *schema = (SSchema *) (pStable->schema + col); + ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR); + schema->bytes = pAlter->schema[0].bytes; + mInfo("msg:%p, app:%p stable %s, start to modify tag len %s to %d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, + name, schema->bytes); SSdbRow row = { .type = SDB_OPER_GLOBAL, @@ -2355,31 +2379,23 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) { return sdbUpdateRow(&row); } -static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) { +static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg) { + SAlterTableMsg *pAlter = pMsg->rpcMsg.pCont; + char* name = pAlter->schema[0].name; SCTableObj *pTable = (SCTableObj *)pMsg->pTable; - int32_t col = mnodeFindNormalTableColumnIndex(pTable, oldName); + int32_t col = mnodeFindNormalTableColumnIndex(pTable, name); if (col < 0) { - mError("msg:%p, app:%p ctable:%s, change column, oldName: %s, newName: %s", pMsg, pMsg->rpcMsg.ahandle, - pTable->info.tableId, oldName, newName); + mError("msg:%p, app:%p ctable:%s, change column, name: %s", pMsg, pMsg->rpcMsg.ahandle, + pTable->info.tableId, name); return TSDB_CODE_MND_FIELD_NOT_EXIST; } - // int32_t rowSize = 0; - uint32_t len = (uint32_t)strlen(newName); - if (len >= TSDB_COL_NAME_LEN) { - return TSDB_CODE_MND_COL_NAME_TOO_LONG; - } - - if (mnodeFindNormalTableColumnIndex(pTable, newName) >= 0) { - return TSDB_CODE_MND_FIELD_ALREAY_EXIST; - } - - // update SSchema *schema = (SSchema *) (pTable->schema + col); - tstrncpy(schema->name, newName, sizeof(schema->name)); + ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR); + schema->bytes = pAlter->schema[0].bytes; - mInfo("msg:%p, app:%p ctable %s, start to modify column %s to %s", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, - oldName, newName); + mInfo("msg:%p, app:%p ctable %s, start to modify column %s len to %d", pMsg, pMsg->rpcMsg.ahandle, pTable->info.tableId, + name, schema->bytes); SSdbRow row = { .type = SDB_OPER_GLOBAL, @@ -3214,15 +3230,9 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { } else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) { code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name); } else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { - //code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name); - (void)mnodeChangeSuperTableColumn; - mError("change table[%s] column[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes); - code = TSDB_CODE_SUCCESS; + code = mnodeChangeSuperTableColumn(pMsg); } else if (pAlter->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) { - //code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name); - (void)mnodeChangeSuperTableColumn; - mError("change table[%s] tag[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes); - code = TSDB_CODE_SUCCESS; + code = mnodeChangeSuperTableTag(pMsg); } else { } } else { @@ -3234,10 +3244,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { } else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) { code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name); } else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) { - //code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name); - (void)mnodeChangeNormalTableColumn; - mError("change table[%s] column[%s] length to [%d] is not processed", pAlter->tableFname, pAlter->schema[0].name, pAlter->schema[0].bytes); - code = TSDB_CODE_SUCCESS; + code = mnodeChangeNormalTableColumn(pMsg); } else { } } From 622b711ad190b2b628d47b7449d502c885f1ca98 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 3 Jun 2021 17:00:30 +0800 Subject: [PATCH 79/82] [TD-4533] : describe C/C++ & Java version of Prepare Statement api. --- .../cn/08.connector/01.java/docs.md | 78 +++++++++++++++++++ documentation20/cn/08.connector/docs.md | 55 +++++++++++-- 2 files changed, 127 insertions(+), 6 deletions(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index 5eec33e2f1..4fc10b542b 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -266,7 +266,9 @@ while(resultSet.next()){ > 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 ### 处理异常 + 在报错后,通过SQLException可以获取到错误的信息和错误码: + ```java try (Statement statement = connection.createStatement()) { // executeQuery @@ -279,11 +281,87 @@ try (Statement statement = connection.createStatement()) { e.printStackTrace(); } ``` + JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。 具体的错误码请参考: * https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java * https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h +### 通过参数绑定写入数据 + +从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。) + +```java +Statement stmt = conn.createStatement(); +Random r = new Random(); + +// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值: +TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)"); + +// 设定数据表名: +s.setTableName("w1"); +// 设定 TAGS 取值: +s.setTagInt(0, r.nextInt(10)); +s.setTagString(1, "Beijing"); + +int numOfRows = 10; + +// VALUES 部分以逐列的方式进行设置: +ArrayList ts = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + ts.add(System.currentTimeMillis() + i); +} +s.setTimestamp(0, ts); + +ArrayList s1 = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + s1.add(r.nextInt(100)); +} +s.setInt(1, s1); + +ArrayList s2 = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + s2.add("test" + r.nextInt(100)); +} +s.setString(2, s2, 10); + +// AddBatch 之后,可以再设定新的表名、TAGS、VALUES 取值,这样就能实现一次执行向多个数据表写入: +s.columnDataAddBatch(); +// 执行语句: +s.columnDataExecuteBatch(); +// 执行完毕,释放资源: +s.columnDataCloseBatch(); +``` + +用于设定 TAGS 取值的方法总共有: +```java +public void setTagNull(int index, int type) +public void setTagBoolean(int index, boolean value) +public void setTagInt(int index, int value) +public void setTagByte(int index, byte value) +public void setTagShort(int index, short value) +public void setTagLong(int index, long value) +public void setTagTimestamp(int index, long value) +public void setTagFloat(int index, float value) +public void setTagDouble(int index, double value) +public void setTagString(int index, String value) +public void setTagNString(int index, String value) +``` + +用于设定 VALUES 数据列的取值的方法总共有: +```java +public void setInt(int columnIndex, ArrayList list) throws SQLException +public void setFloat(int columnIndex, ArrayList list) throws SQLException +public void setTimestamp(int columnIndex, ArrayList list) throws SQLException +public void setLong(int columnIndex, ArrayList list) throws SQLException +public void setDouble(int columnIndex, ArrayList list) throws SQLException +public void setBoolean(int columnIndex, ArrayList list) throws SQLException +public void setByte(int columnIndex, ArrayList list) throws SQLException +public void setShort(int columnIndex, ArrayList list) throws SQLException +public void setString(int columnIndex, ArrayList list, int size) throws SQLException +public void setNString(int columnIndex, ArrayList list, int size) throws SQLException +``` + ### 订阅 #### 创建 diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 9484917993..f3e85cc3dd 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -291,9 +291,25 @@ typedef struct taosField { TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线程同时打开多张表,并可以同时对每张打开的表进行查询或者插入操作。需要指出的是,**客户端应用必须确保对同一张表的操作完全串行化**,即对同一个表的插入或查询操作未完成时(未返回时),不能够执行第二个插入或查询操作。 -### 参数绑定API + +### 参数绑定 API -除了直接调用 `taos_query` 进行查询,TDengine也提供了支持参数绑定的Prepare API,与 MySQL 一样,这些API目前也仅支持用问号`?`来代表待绑定的参数,具体如下: +除了直接调用 `taos_query` 进行查询,TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 一样,这些 API 目前也仅支持用问号 `?` 来代表待绑定的参数。 + +从 2.1.1.0 和 2.1.2.0 版本开始,TDengine 大幅改进了参数绑定接口对数据写入(INSERT)场景的支持。这样在通过参数绑定接口写入数据时,就避免了 SQL 语法解析的资源消耗,从而在绝大多数情况下显著提升写入性能。此时的典型操作步骤如下: +1. 调用 `taos_stmt_init` 创建参数绑定对象; +2. 调用 `taos_stmt_prepare` 解析 INSERT 语句; +3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名; +4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值; +5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值; +6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理; +7. 可以重复第 3~6 步,为批处理加入更多的数据行; +8. 调用 `taos_stmt_execute` 执行已经准备好的批处理指令; +9. 执行完毕,调用 `taos_stmt_close` 释放所有资源。 + +除 C/C++ 语言外,TDengine 的 Java 语言 JNI Connector 也提供参数绑定接口支持,具体请另外参见:[参数绑定接口的 Java 用法](https://www.taosdata.com/cn/documentation/connector/java#stmt-java)。 + +接口相关的具体函数如下(也可以参考 [apitest.c](https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/apitest.c) 文件中使用对应函数的方式): - `TAOS_STMT* taos_stmt_init(TAOS *taos)` @@ -301,11 +317,12 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线 - `int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length)` - 解析一条sql语句,将解析结果和参数信息绑定到stmt上,如果参数length大于0,将使用此参数作为sql语句的长度,如等于0,将自动判断sql语句的长度。 + 解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0,将使用此参数作为 SQL 语句的长度,如等于 0,将自动判断 SQL 语句的长度。 - `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)` - 进行参数绑定,bind指向一个数组,需保证此数组的元素数量和顺序与sql语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL中的 MYSQL_BIND 一致,具体定义如下: + 不如 `taos_stmt_bind_param_batch` 效率高,但可以支持非 INSERT 类型的 SQL 语句。 + 进行参数绑定,bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 一致,具体定义如下: ```c typedef struct TAOS_BIND { @@ -319,9 +336,35 @@ typedef struct TAOS_BIND { } TAOS_BIND; ``` +- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)` + + (2.1.1.0 版本新增) + 当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。 + +- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)` + + (2.1.2.0 版本新增) + 当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列)。tags 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。 + +- `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)` + + (2.1.1.0 版本新增) + 以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序与 SQL 语句中的 VALUES 参数完全一致。如果这里传递的数据列数少于 SQL 语句的要求,可以再次调用 `taos_stmt_bind_param_batch` 函数来补充数据列,直到列数与 SQL 语句的要求一致为止。TAOS_MULTI_BIND 的具体定义如下: + +```c +typedef struct TAOS_MULTI_BIND { + int buffer_type; + void * buffer; + uintptr_t buffer_length; + int32_t * length; + char * is_null; + int num; // 列的个数,即 buffer 中的参数个数 +} TAOS_MULTI_BIND; +``` + - `int taos_stmt_add_batch(TAOS_STMT *stmt)` - 将当前绑定的参数加入批处理中,调用此函数后,可以再次调用`taos_stmt_bind_param`绑定新的参数。需要注意,此函数仅支持 insert/import 语句,如果是select等其他SQL语句,将返回错误。 + 将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param` 或 `taos_stmt_bind_param_batch` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。 - `int taos_stmt_execute(TAOS_STMT *stmt)` @@ -329,7 +372,7 @@ typedef struct TAOS_BIND { - `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)` - 获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result`以释放资源。 + 获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result` 以释放资源。 - `int taos_stmt_close(TAOS_STMT *stmt)` From f7190b2fdc416f045e43b784099f93552ad55489 Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 3 Jun 2021 17:48:25 +0800 Subject: [PATCH 80/82] [TD-1920]fix compact bug:sync sdb to disk when compact end --- src/dnode/src/dnodeMain.c | 6 ++++++ src/mnode/src/mnodeSdb.c | 7 ++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index a4ff9df203..cf633502c1 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -95,11 +95,17 @@ static SStep tsDnodeCompactSteps[] = { {"dnode-minfos", dnodeInitMInfos, dnodeCleanupMInfos}, {"dnode-wal", walInit, walCleanUp}, {"dnode-sync", syncInit, syncCleanUp}, + {"dnode-vread", dnodeInitVRead, dnodeCleanupVRead}, + {"dnode-vwrite", dnodeInitVWrite, dnodeCleanupVWrite}, + {"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt}, {"dnode-mread", dnodeInitMRead, NULL}, {"dnode-mwrite", dnodeInitMWrite, NULL}, {"dnode-mpeer", dnodeInitMPeer, NULL}, {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes}, {"dnode-modules", dnodeInitModules, dnodeCleanupModules}, + {"dnode-mread", NULL, dnodeCleanupMRead}, + {"dnode-mwrite", NULL, dnodeCleanupMWrite}, + {"dnode-mpeer", NULL, dnodeCleanupMPeer}, }; static int dnodeCreateDir(const char *dir) { diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index e9acd5b9bc..a8bcdfa59e 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -1176,9 +1176,10 @@ int32_t mnodeCompactWal() { return -1; } - // close wal - walFsync(tsSdbMgmt.wal, true); - walClose(tsSdbMgmt.wal); + // close sdb and sync to disk + //walFsync(tsSdbMgmt.wal, true); + //walClose(tsSdbMgmt.wal); + sdbCleanUp(); // rename old wal to wal_bak if (taosRename(tsMnodeDir, tsMnodeBakDir) != 0) { From 33c0f3b58af4ec2a4ee18967c24cc72ba3bc53ab Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 3 Jun 2021 17:56:04 +0800 Subject: [PATCH 81/82] [TD-4533] : fix description about func "taos_stmt_bind_param_batch". --- documentation20/cn/08.connector/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index f3e85cc3dd..60f8df95f8 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -349,7 +349,7 @@ typedef struct TAOS_BIND { - `int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind)` (2.1.1.0 版本新增) - 以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序与 SQL 语句中的 VALUES 参数完全一致。如果这里传递的数据列数少于 SQL 语句的要求,可以再次调用 `taos_stmt_bind_param_batch` 函数来补充数据列,直到列数与 SQL 语句的要求一致为止。TAOS_MULTI_BIND 的具体定义如下: + 以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下: ```c typedef struct TAOS_MULTI_BIND { From b1a0b834a19e0d6c09f2b9b457bccc5046549e2c Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 3 Jun 2021 10:03:19 +0000 Subject: [PATCH 82/82] [TD-4043]fix bug in session window --- src/query/src/qExecutor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 25e7e446bd..b7512ac1f0 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1347,7 +1347,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf pInfo->start = j; } else if (tsList[j] - pInfo->prevTs <= gap) { pInfo->curWindow.ekey = tsList[j]; - //pInfo->prevTs = tsList[j]; + pInfo->prevTs = tsList[j]; pInfo->numOfRows += 1; if (j == 0 && pInfo->start != 0) { pInfo->numOfRows = 1;