From f8e5a3a7e0e32c2cbc6a3ecb26d84204013bb487 Mon Sep 17 00:00:00 2001 From: alexduan <417921451@qq.com> Date: Mon, 16 Aug 2021 10:53:41 +0800 Subject: [PATCH 01/60] add waitMoment --- src/query/src/queryMain.c | 44 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 1a9c057ef0..bb3f262c98 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -215,6 +215,49 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi return code; } +// wait moment +int waitMoment(SQInfo* pQInfo){ + if(pQInfo->sql) { + int ms = 0; + char* pos = strstr(pQInfo->sql, " t_"); + if(pos){ + pos += 3; + ms = atoi(pos); + while(*pos >= '0' && *pos <= '9'){ + pos ++; + } + char unit_char = *pos; + if(unit_char == 'h'){ + ms *= 3600*1000; + } else if(unit_char == 'm'){ + ms *= 60*1000; + } else if(unit_char == 's'){ + ms *= 1000; + } + } + + printf("wait sleep %dms ... sql=%s\n", ms, pQInfo->sql); + + if(ms < 1000) { + taosMsleep(ms); + } else { + int used_ms = 0; + while(used_ms < ms) { + taosMsleep(1000); + used_ms += 1000; + if(isQueryKilled(pQInfo)){ + printf(" check query is canceled, sleep break... \n"); + break; + } + } + } + + taosMsleep(ms); + } + return 1; +} + + bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { SQInfo *pQInfo = (SQInfo *)qinfo; assert(pQInfo && pQInfo->signature == pQInfo); @@ -259,6 +302,7 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { int64_t st = taosGetTimestampUs(); pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup); pQInfo->summary.elapsedTime += (taosGetTimestampUs() - st); + waitMoment(pQInfo); publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC); pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv); From 3d0d67710cc74c6ac63ff56b8a6a76c7f8f7bd75 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Wed, 18 Aug 2021 20:01:18 +0800 Subject: [PATCH 02/60] longquery push --- src/inc/query.h | 12 ++++ src/inc/taoserror.h | 1 + src/inc/tsdb.h | 1 + src/inc/vnode.h | 3 + src/query/src/queryMain.c | 87 +++++++++++++++++++++++++ src/tsdb/inc/tsdbBuffer.h | 4 ++ src/tsdb/inc/tsdbHealth.h | 25 ++++++++ src/tsdb/src/tsdbBuffer.c | 18 ++++-- src/tsdb/src/tsdbCommit.c | 2 + src/tsdb/src/tsdbHealth.c | 132 ++++++++++++++++++++++++++++++++++++++ src/vnode/src/vnodeMain.c | 6 ++ 11 files changed, 285 insertions(+), 6 deletions(-) create mode 100644 src/tsdb/inc/tsdbHealth.h create mode 100644 src/tsdb/src/tsdbHealth.c diff --git a/src/inc/query.h b/src/inc/query.h index fb9cbff858..28bd14e66f 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -76,6 +76,9 @@ void* qGetResultRetrieveMsg(qinfo_t qinfo); */ int32_t qKillQuery(qinfo_t qinfo); +//kill by qid +int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount); + int32_t qQueryCompleted(qinfo_t qinfo); /** @@ -94,6 +97,15 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle); bool checkQIdEqual(void *qHandle, uint64_t qId); int64_t genQueryId(void); +// util + +typedef struct { + int64_t qId; + int32_t timeMs; +} SLongQuery; +// return SArray* include SLongQuery* +void* qObtainLongQuery(void* qMgmt, int32_t longMs); + #ifdef __cplusplus } #endif diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 368658377c..b04970e85d 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -35,6 +35,7 @@ int32_t* taosGetErrno(); #define terrno (*taosGetErrno()) #define TSDB_CODE_SUCCESS 0 +#define TSDB_CODE_FAILED -1 // unknown or needn't tell detail error // rpc #define TSDB_CODE_RPC_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0001) //"Action in progress") diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 7abe3e99c7..5e5ecc2438 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -192,6 +192,7 @@ typedef struct { SList * bufBlockList; int64_t pointsAdd; // TODO int64_t storageAdd; // TODO + int64_t commitedMs; // commited ms time , zero is no commit. } SMemTable; typedef struct { diff --git a/src/inc/vnode.h b/src/inc/vnode.h index b3291645c0..2cc56af9de 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -88,6 +88,9 @@ int32_t vnodeWriteToRQueue(void *pVnode, void *pCont, int32_t contLen, int8_t qt void vnodeFreeFromRQueue(void *pVnode, SVReadMsg *pRead); int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead); +// util +void* vnodeGetqMgmt(void* pVnode); + #ifdef __cplusplus } #endif diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index bb3f262c98..27680a7151 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -219,6 +219,9 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi int waitMoment(SQInfo* pQInfo){ if(pQInfo->sql) { int ms = 0; + char* pcnt = strstr(pQInfo->sql, " count(*)"); + if(pcnt) return 0; + char* pos = strstr(pQInfo->sql, " t_"); if(pos){ pos += 3; @@ -604,3 +607,87 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle) { taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, freeHandle); return 0; } + +//kill by qid +int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount) { + int32_t error = TSDB_CODE_SUCCESS; + void** handle = qAcquireQInfo(pMgmt, qId); + if(handle == NULL) return terrno; + + SQInfo* pQInfo = (SQInfo*)(*handle); + if (pQInfo == NULL || !isValidQInfo(pQInfo)) { + return TSDB_CODE_QRY_INVALID_QHANDLE; + } + + qDebug("QInfo:0x%"PRIx64" query killed by qid.", pQInfo->qId); + setQueryKilled(pQInfo); + + // wait query stop + int32_t loop = 0; + while (pQInfo->owner != 0) { + taosMsleep(waitMs); + if(loop++ > waitCount){ + error = TSDB_CODE_FAILED; + break; + } + } + + return error; +} + +int compareLongQuery(const void* p1, const void* p2) { + // sort desc + SLongQuery* plq1 = (SLongQuery*)p1; + SLongQuery* plq2 = (SLongQuery*)p2; + if(plq1->timeMs == plq2->timeMs) { + return 0; + } else if(plq1->timeMs > plq2->timeMs) { + return -1; + } else { + return 1; + } +} + +// util +void* qObtainLongQuery(void* param, int32_t longMs){ + SQueryMgmt* qMgmt = (SQueryMgmt*)param; + if(qMgmt == NULL || qMgmt->qinfoPool == NULL) return NULL; + SArray* qids = taosArrayInit(4, sizeof(int64_t*)); + + SHashObj* pHashTable = qMgmt->qinfoPool->pHashTable; + if(pHashTable == NULL || pHashTable->hashList == NULL) return NULL; + + SQInfo * qInfo = (SQInfo*)taosHashIterate(pHashTable, NULL); + while(qInfo){ + // judge long query + SMemTable* imem = qInfo->runtimeEnv.pQueryAttr->memRef.snapshot.imem; + if(imem == NULL || imem->commitedMs == 0) continue; + int64_t now = taosGetTimestampMs(); + if(imem->commitedMs > now) continue; // weird, so skip + + int32_t passMs = now - imem->commitedMs; + if(passMs < longMs) { + continue; + } + + // push + SLongQuery* plq = (SLongQuery*)malloc(sizeof(SLongQuery)); + plq->timeMs = passMs; + plq->qId = qInfo->qId; + taosArrayPush(qids, plq); + + // next + qInfo = (SQInfo*)taosHashIterate(pHashTable, qInfo); + } + + size_t cnt = taosArrayGetSize(qids); + if(cnt == 0) { + taosArrayDestroyEx(qids, free); + return NULL; + } + if(cnt > 1) { + taosArraySort(qids, compareLongQuery); + } + + return qids; +} \ No newline at end of file diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h index ec6b057aef..17919c284e 100644 --- a/src/tsdb/inc/tsdbBuffer.h +++ b/src/tsdb/inc/tsdbBuffer.h @@ -43,4 +43,8 @@ SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks); void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode); +// health cite +STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize); +void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock); + #endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/inc/tsdbHealth.h b/src/tsdb/inc/tsdbHealth.h new file mode 100644 index 0000000000..9c48f552bb --- /dev/null +++ b/src/tsdb/inc/tsdbHealth.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_TSDB_HEALTH_H_ +#define _TD_TSDB_HEALTH_H_ + +bool tsdbUrgeQueryFree(STsdbRepo* pRepo); +int32_t tsdbInsertNewBlock(STsdbRepo* pRepo); + +bool enoughIdleMemory(); +bool allowNewBlock(STsdbRepo* pRepo); + +#endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index e675bf6f9d..a8d800208c 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -14,12 +14,10 @@ */ #include "tsdbint.h" +#include "tsdbHealth.h" #define POOL_IS_EMPTY(b) (listNEles((b)->bufBlockList) == 0) -static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize); -static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock); - // ---------------- INTERNAL FUNCTIONS ---------------- STsdbBufPool *tsdbNewBufPool() { STsdbBufPool *pBufPool = (STsdbBufPool *)calloc(1, sizeof(*pBufPool)); @@ -120,6 +118,14 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { STsdbBufPool *pBufPool = pRepo->pPool; while (POOL_IS_EMPTY(pBufPool)) { + // supply new Block + if(tsdbInsertNewBlock(pRepo) > 0) { + break; + } else { + // no newBlock, kill query free + tsdbUrgeQueryFree(pRepo); + } + pRepo->repoLocked = false; pthread_cond_wait(&(pBufPool->poolNotEmpty), &(pRepo->mutex)); pRepo->repoLocked = true; @@ -139,7 +145,7 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { } // ---------------- LOCAL FUNCTIONS ---------------- -static STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { +STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { STsdbBufBlock *pBufBlock = (STsdbBufBlock *)malloc(sizeof(*pBufBlock) + bufBlockSize); if (pBufBlock == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; @@ -157,7 +163,7 @@ _err: return NULL; } -static void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } + void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks) { if (oldTotalBlocks == pRepo->config.totalBlocks) { @@ -199,4 +205,4 @@ void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) { tsdbFreeBufBlock(pBufBlock); free(pNode); pPool->nBufBlocks--; -} +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 8f5f885d69..6fae5c6555 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -547,6 +547,8 @@ static void tsdbEndCommit(STsdbRepo *pRepo, int eno) { (void)tsdbLockRepo(pRepo); pRepo->imem = NULL; (void)tsdbUnlockRepo(pRepo); + //save commited time + pIMem->commitedMs = taosGetTimestampMs(); tsdbUnRefMemTable(pRepo, pIMem); tsem_post(&(pRepo->readyToCommit)); } diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c new file mode 100644 index 0000000000..7cd6672e93 --- /dev/null +++ b/src/tsdb/src/tsdbHealth.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "taosmsg.h" +#include "tarray.h" +#include "query.h" +#include "tglobal.h" +#include "tsdbint.h" +#include "tsdbBuffer.h" +#include "tsdbLog.h" +#include "tsdbHealth.h" +#include "tsdbint.h" +#include "ttimer.h" +#include "vnode.h" + + +// return malloc new block count +int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { + STsdbBufPool *pPool = pRepo->pPool; + int32_t cnt = 0; + + if(enoughIdleMemory() && allowNewBlock(pRepo)) { + STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize); + if (pBufBlock) { + if (tsdbLockRepo(pRepo) >= 0) { + if (tdListAppend(pPool->bufBlockList, (void *)(&pBufBlock)) < 0) { + // append error + tsdbFreeBufBlock(pBufBlock); + } else { + pPool->nRecycleBlocks ++; + cnt ++ ; + } + tsdbUnlockRepo(pRepo); + } + } + } + return cnt; +} + +// switch anther thread to run +void cbKillQueryFree(void* param1, void* param2) { + STsdbRepo* pRepo = (STsdbRepo*)param1; + int32_t longMs = 2000; // TODO config to taos.cfg + + // vnode + void* vnodeObj = pRepo->appH.appH; + if(vnodeObj == NULL) return ; + + // qMgmt + void* qMgmt = vnodeGetqMgmt(vnodeObj); + if(qMgmt == NULL) return ; + + // qid top list + SArray *qids = (SArray*)qObtainLongQuery(qMgmt, longMs); + if(qids == NULL) return ; + + // kill Query + size_t cnt = taosArrayGetSize(qids); + int64_t qId = 0; + for(size_t i=0; i < cnt; i++) { + qId = *(int64_t*)taosArrayGetP(qids, i); + qKillQueryByQId(qMgmt, qId, 100, 50); // wait 50*100 ms + // notify wait + pthread_cond_signal(&pRepo->pPool->poolNotEmpty); + // check break condition + if(enoughIdleMemory() && allowNewBlock(pRepo)) { + break; + } + } + + // free qids + taosArrayDestroyEx(qids, free); +} + +// return true do free , false do nothing +bool tsdbUrgeQueryFree(STsdbRepo * pRepo) { + // 1 start timer + tmr_h hTimer = taosTmrStart(cbKillQueryFree, 1, pRepo, NULL); + return hTimer != NULL; +} + +bool enoughIdleMemory(){ + // TODO config to taos.cfg + int32_t lowestRate = 20; // below 20% idle memory, return not enough memory + float memoryUsedMB = 0; + float memoryAvailMB; + + if (true != taosGetSysMemory(&memoryUsedMB)) { + tsdbWarn("tsdbHealth get memory error, return false."); + return false; + } + + if(memoryUsedMB > tsTotalMemoryMB || tsTotalMemoryMB == 0) { + tsdbWarn("tsdbHealth used memory(%d MB) large total memory(%d MB), return false.", (int)memoryUsedMB, (int)tsTotalMemoryMB); + return false; + } + + memoryAvailMB = (float)tsTotalMemoryMB - memoryUsedMB; + int32_t rate = (int32_t)(memoryAvailMB/tsTotalMemoryMB * 100); + if(rate < lowestRate){ + tsdbWarn("tsdbHealth real rate :%d less than lowest rate:%d, so return false.", rate, lowestRate); + return false; + } + + return true; +} + +bool allowNewBlock(STsdbRepo* pRepo){ + //TODO config to taos.cfg + int32_t nElasticBlocks = 10; + STsdbBufPool* pPool = pRepo->pPool; + int32_t nOverBlocks = pPool->nBufBlocks - pRepo->config.totalBlocks; + if(nOverBlocks > nElasticBlocks) { + tsdbWarn("tsdbHealth allowNewBlock forbid. nOverBlocks(%d) > nElasticBlocks(%d)", nOverBlocks, nElasticBlocks); + return false; + } + + return true; +} diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index f826c1aecd..7802d2a081 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -562,3 +562,9 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { return 0; } + +// get qmgmt +void* vnodeGetqMgmt(void* pVnode){ + if(pVnode == NULL) return NULL; + return ((SVnodeObj*)pVnode)->qMgmt; +} From f0ce38e77cadcb2a1226cb02755366a4b8cfd0c6 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Thu, 19 Aug 2021 11:28:37 +0800 Subject: [PATCH 03/60] restore vnode.h and remove vnodeGetqMgmt() to tsdbHealth.c --- src/inc/vnode.h | 3 --- src/tsdb/src/tsdbHealth.c | 8 ++++++-- src/vnode/src/vnodeMain.c | 6 ------ src/vnode/src/vnodeSync.c | 1 + 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/inc/vnode.h b/src/inc/vnode.h index 2cc56af9de..b3291645c0 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -88,9 +88,6 @@ int32_t vnodeWriteToRQueue(void *pVnode, void *pCont, int32_t contLen, int8_t qt void vnodeFreeFromRQueue(void *pVnode, SVReadMsg *pRead); int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead); -// util -void* vnodeGetqMgmt(void* pVnode); - #ifdef __cplusplus } #endif diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index 7cd6672e93..1c1c45c9ae 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -22,10 +22,14 @@ #include "tsdbBuffer.h" #include "tsdbLog.h" #include "tsdbHealth.h" -#include "tsdbint.h" #include "ttimer.h" -#include "vnode.h" +#include "../../vnode/inc/vnodeInt.h" +// get qmgmt +void* vnodeGetqMgmt(void* pVnode){ + if(pVnode == NULL) return NULL; + return ((SVnodeObj*)pVnode)->qMgmt; +} // return malloc new block count int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 7802d2a081..f826c1aecd 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -562,9 +562,3 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { return 0; } - -// get qmgmt -void* vnodeGetqMgmt(void* pVnode){ - if(pVnode == NULL) return NULL; - return ((SVnodeObj*)pVnode)->qMgmt; -} diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index 2bdfd2ead3..7a886cf758 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -178,3 +178,4 @@ void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code, bool forc SVnodeObj *pVnode = vparam; syncConfirmForward(pVnode->sync, version, code, force); } + From 79a18b2422e1ce19fc6d6cf5112ebc0f6351f26b Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Thu, 19 Aug 2021 17:23:53 +0800 Subject: [PATCH 04/60] tsdb call vnode method change to callback notify --- src/inc/query.h | 11 ++-------- src/inc/tsdb.h | 5 +++++ src/query/src/queryMain.c | 38 +++++++++++++++++++++++++++++++--- src/tsdb/inc/tsdbHealth.h | 3 --- src/tsdb/src/tsdbHealth.c | 43 +++++---------------------------------- src/vnode/src/vnodeMain.c | 5 +++++ 6 files changed, 52 insertions(+), 53 deletions(-) diff --git a/src/inc/query.h b/src/inc/query.h index 28bd14e66f..0872e3dbaa 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -79,6 +79,8 @@ int32_t qKillQuery(qinfo_t qinfo); //kill by qid int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCount); +bool qSolveCommitNoBlock(void* pRepo, void* pMgmt); + int32_t qQueryCompleted(qinfo_t qinfo); /** @@ -97,15 +99,6 @@ void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool freeHandle); bool checkQIdEqual(void *qHandle, uint64_t qId); int64_t genQueryId(void); -// util - -typedef struct { - int64_t qId; - int32_t timeMs; -} SLongQuery; -// return SArray* include SLongQuery* -void* qObtainLongQuery(void* qMgmt, int32_t longMs); - #ifdef __cplusplus } #endif diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 5e5ecc2438..52c99a3fe5 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -39,6 +39,7 @@ extern "C" { #define TSDB_STATUS_COMMIT_START 1 #define TSDB_STATUS_COMMIT_OVER 2 +#define TSDB_STATUS_COMMIT_NOBLOCK 3 //commit no block, need to be solved // TSDB STATE DEFINITION #define TSDB_STATE_OK 0x0 @@ -414,6 +415,10 @@ int tsdbSyncRecv(void *pRepo, SOCKET socketFd); // For TSDB Compact int tsdbCompact(STsdbRepo *pRepo); +// For TSDB Health Monitor +bool tsdbAllowNewBlock(STsdbRepo* pRepo); +bool tsdbIdleMemEnough(); + #ifdef __cplusplus } #endif diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index b2e6ed7a7b..d9b01b031d 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -628,8 +628,7 @@ int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCo if (pQInfo == NULL || !isValidQInfo(pQInfo)) { return TSDB_CODE_QRY_INVALID_QHANDLE; } - - qDebug("QInfo:0x%"PRIx64" query killed by qid.", pQInfo->qId); + qWarn("QId:0x%"PRIx64" query killed becase no memory commit.", pQInfo->qId); setQueryKilled(pQInfo); // wait query stop @@ -645,6 +644,13 @@ int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCo return error; } +// local struct +typedef struct { + int64_t qId; + int32_t timeMs; +} SLongQuery; + +// compare int compareLongQuery(const void* p1, const void* p2) { // sort desc SLongQuery* plq1 = (SLongQuery*)p1; @@ -658,7 +664,7 @@ int compareLongQuery(const void* p1, const void* p2) { } } -// util +// longquery void* qObtainLongQuery(void* param, int32_t longMs){ SQueryMgmt* qMgmt = (SQueryMgmt*)param; if(qMgmt == NULL || qMgmt->qinfoPool == NULL) return NULL; @@ -700,4 +706,30 @@ void* qObtainLongQuery(void* param, int32_t longMs){ } return qids; +} + +//solve tsdb no block to commit +bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { + SQueryMgmt *pQueryMgmt = pMgmt; + int32_t longMs = 2000; // TODO config to taos.cfg + + // qid top list + SArray *qids = (SArray*)qObtainLongQuery(pQueryMgmt, longMs); + if(qids == NULL) return false; + + // kill Query + size_t cnt = taosArrayGetSize(qids); + SLongQuery* plq; + for(size_t i=0; i < cnt; i++) { + plq = (SLongQuery* )taosArrayGetP(qids, i); + qKillQueryByQId(pMgmt, plq->qId, 100, 50); // wait 50*100 ms + + // check break condition + if(tsdbIdleMemEnough() && tsdbAllowNewBlock(pRepo)) { + break; + } + } + // free qids + taosArrayDestroyEx(qids, free); + return true; } \ No newline at end of file diff --git a/src/tsdb/inc/tsdbHealth.h b/src/tsdb/inc/tsdbHealth.h index 9c48f552bb..e70c26f939 100644 --- a/src/tsdb/inc/tsdbHealth.h +++ b/src/tsdb/inc/tsdbHealth.h @@ -19,7 +19,4 @@ bool tsdbUrgeQueryFree(STsdbRepo* pRepo); int32_t tsdbInsertNewBlock(STsdbRepo* pRepo); -bool enoughIdleMemory(); -bool allowNewBlock(STsdbRepo* pRepo); - #endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index 1c1c45c9ae..e5e95e405f 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -24,19 +24,12 @@ #include "tsdbHealth.h" #include "ttimer.h" -#include "../../vnode/inc/vnodeInt.h" -// get qmgmt -void* vnodeGetqMgmt(void* pVnode){ - if(pVnode == NULL) return NULL; - return ((SVnodeObj*)pVnode)->qMgmt; -} - // return malloc new block count int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { STsdbBufPool *pPool = pRepo->pPool; int32_t cnt = 0; - if(enoughIdleMemory() && allowNewBlock(pRepo)) { + if(tsdbIdleMemEnough() && tsdbAllowNewBlock(pRepo)) { STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize); if (pBufBlock) { if (tsdbLockRepo(pRepo) >= 0) { @@ -57,36 +50,10 @@ int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { // switch anther thread to run void cbKillQueryFree(void* param1, void* param2) { STsdbRepo* pRepo = (STsdbRepo*)param1; - int32_t longMs = 2000; // TODO config to taos.cfg - // vnode - void* vnodeObj = pRepo->appH.appH; - if(vnodeObj == NULL) return ; - - // qMgmt - void* qMgmt = vnodeGetqMgmt(vnodeObj); - if(qMgmt == NULL) return ; - - // qid top list - SArray *qids = (SArray*)qObtainLongQuery(qMgmt, longMs); - if(qids == NULL) return ; - - // kill Query - size_t cnt = taosArrayGetSize(qids); - int64_t qId = 0; - for(size_t i=0; i < cnt; i++) { - qId = *(int64_t*)taosArrayGetP(qids, i); - qKillQueryByQId(qMgmt, qId, 100, 50); // wait 50*100 ms - // notify wait - pthread_cond_signal(&pRepo->pPool->poolNotEmpty); - // check break condition - if(enoughIdleMemory() && allowNewBlock(pRepo)) { - break; - } + if(pRepo->appH.notifyStatus) { + pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_NOBLOCK, TSDB_CODE_SUCCESS); } - - // free qids - taosArrayDestroyEx(qids, free); } // return true do free , false do nothing @@ -96,7 +63,7 @@ bool tsdbUrgeQueryFree(STsdbRepo * pRepo) { return hTimer != NULL; } -bool enoughIdleMemory(){ +bool tsdbIdleMemEnough() { // TODO config to taos.cfg int32_t lowestRate = 20; // below 20% idle memory, return not enough memory float memoryUsedMB = 0; @@ -122,7 +89,7 @@ bool enoughIdleMemory(){ return true; } -bool allowNewBlock(STsdbRepo* pRepo){ +bool tsdbAllowNewBlock(STsdbRepo* pRepo) { //TODO config to taos.cfg int32_t nElasticBlocks = 10; STsdbBufPool* pPool = pRepo->pPool; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index f826c1aecd..c823880ae2 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -560,5 +560,10 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { return vnodeSaveVersion(pVnode); } + // timer thread callback + if(status == TSDB_STATUS_COMMIT_NOBLOCK) { + qSolveCommitNoBlock(pVnode->tsdb, pVnode->qMgmt); + } + return 0; } From 0608e9086bc9f3b15101c77823bfbf6055d4209f Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Thu, 19 Aug 2021 21:02:43 +0800 Subject: [PATCH 05/60] nElasticBlocks replace nBufBlocks --- src/tsdb/inc/tsdbBuffer.h | 3 ++- src/tsdb/src/tsdbBuffer.c | 8 ++++++-- src/tsdb/src/tsdbHealth.c | 13 ++++--------- src/tsdb/src/tsdbMemTable.c | 13 +++++++++---- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/tsdb/inc/tsdbBuffer.h b/src/tsdb/inc/tsdbBuffer.h index 17919c284e..4b650d3993 100644 --- a/src/tsdb/inc/tsdbBuffer.h +++ b/src/tsdb/inc/tsdbBuffer.h @@ -29,6 +29,7 @@ typedef struct { int tBufBlocks; int nBufBlocks; int nRecycleBlocks; + int nElasticBlocks; int64_t index; SList* bufBlockList; } STsdbBufPool; @@ -41,7 +42,7 @@ int tsdbOpenBufPool(STsdbRepo* pRepo); void tsdbCloseBufPool(STsdbRepo* pRepo); SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo); int tsdbExpandPool(STsdbRepo* pRepo, int32_t oldTotalBlocks); -void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode); +void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic); // health cite STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize); diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index a8d800208c..06a98323ab 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -67,6 +67,7 @@ int tsdbOpenBufPool(STsdbRepo *pRepo) { pPool->bufBlockSize = pCfg->cacheBlockSize * 1024 * 1024; // MB pPool->tBufBlocks = pCfg->totalBlocks; pPool->nBufBlocks = 0; + pPool->nElasticBlocks = 0; pPool->index = 0; pPool->nRecycleBlocks = 0; @@ -199,10 +200,13 @@ err: return err; } -void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode) { +void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic) { STsdbBufBlock *pBufBlock = NULL; tdListNodeGetData(pPool->bufBlockList, pNode, (void *)(&pBufBlock)); tsdbFreeBufBlock(pBufBlock); free(pNode); - pPool->nBufBlocks--; + if(bELastic) + pPool->nElasticBlocks--; + else + pPool->nBufBlocks--; } \ No newline at end of file diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index e5e95e405f..32f6f4fdb8 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -32,16 +32,13 @@ int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { if(tsdbIdleMemEnough() && tsdbAllowNewBlock(pRepo)) { STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize); if (pBufBlock) { - if (tsdbLockRepo(pRepo) >= 0) { if (tdListAppend(pPool->bufBlockList, (void *)(&pBufBlock)) < 0) { // append error tsdbFreeBufBlock(pBufBlock); } else { - pPool->nRecycleBlocks ++; + pPool->nElasticBlocks ++; cnt ++ ; } - tsdbUnlockRepo(pRepo); - } } } return cnt; @@ -91,13 +88,11 @@ bool tsdbIdleMemEnough() { bool tsdbAllowNewBlock(STsdbRepo* pRepo) { //TODO config to taos.cfg - int32_t nElasticBlocks = 10; + int32_t nMaxElastic = 3; STsdbBufPool* pPool = pRepo->pPool; - int32_t nOverBlocks = pPool->nBufBlocks - pRepo->config.totalBlocks; - if(nOverBlocks > nElasticBlocks) { - tsdbWarn("tsdbHealth allowNewBlock forbid. nOverBlocks(%d) > nElasticBlocks(%d)", nOverBlocks, nElasticBlocks); + if(pPool->nElasticBlocks >= nMaxElastic) { + tsdbWarn("tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", pPool->nElasticBlocks, nMaxElastic); return false; } - return true; } diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index e766d97a97..02c9946704 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -99,17 +99,22 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { STsdbBufPool *pBufPool = pRepo->pPool; SListNode *pNode = NULL; - bool recycleBlocks = pBufPool->nRecycleBlocks > 0; + bool addNew = false; if (tsdbLockRepo(pRepo) < 0) return -1; while ((pNode = tdListPopHead(pMemTable->bufBlockList)) != NULL) { if (pBufPool->nRecycleBlocks > 0) { - tsdbRecycleBufferBlock(pBufPool, pNode); + tsdbRecycleBufferBlock(pBufPool, pNode, false); pBufPool->nRecycleBlocks -= 1; } else { - tdListAppendNode(pBufPool->bufBlockList, pNode); + if(pBufPool->nElasticBlocks > 0 && listNEles(pBufPool->bufBlockList) > 0) { + tsdbRecycleBufferBlock(pBufPool, pNode, true); + } else { + tdListAppendNode(pBufPool->bufBlockList, pNode); + addNew = true; + } } } - if (!recycleBlocks) { + if (addNew) { int code = pthread_cond_signal(&pBufPool->poolNotEmpty); if (code != 0) { if (tsdbUnlockRepo(pRepo) < 0) return -1; From 3d79ac346fb3362ae693e30e4c7d41d0b957091c Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Mon, 23 Aug 2021 13:51:10 +0800 Subject: [PATCH 06/60] qRequire ref add --- src/client/src/tscSystem.c | 2 +- src/inc/tsdb.h | 5 +- src/mnode/src/mnodeProfile.c | 4 +- src/mnode/src/mnodeShow.c | 4 +- src/plugins/http/src/httpContext.c | 4 +- src/plugins/http/src/httpSession.c | 2 +- src/query/src/qExecutor.c | 2 +- src/query/src/queryMain.c | 131 +++++++++++++++++------------ src/tsdb/inc/tsdbHealth.h | 3 + src/tsdb/inc/tsdbint.h | 1 + src/tsdb/src/tsdbBuffer.c | 12 ++- src/tsdb/src/tsdbHealth.c | 28 ++++-- src/tsdb/src/tsdbMain.c | 6 ++ src/util/inc/tcache.h | 4 +- src/util/src/tcache.c | 21 ++--- 15 files changed, 147 insertions(+), 82 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c04765b065..dc4a32cc13 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -59,7 +59,7 @@ void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) { taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } -void tscFreeRpcObj(void *param) { +void tscFreeRpcObj(void *param, void* param1) { assert(param); SRpcObj *pRpcObj = (SRpcObj *)(param); tscDebug("free rpcObj:%p and free pDnodeConn: %p", pRpcObj, pRpcObj->pDnodeConn); diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 52c99a3fe5..b2219a53a7 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -416,8 +416,9 @@ int tsdbSyncRecv(void *pRepo, SOCKET socketFd); int tsdbCompact(STsdbRepo *pRepo); // For TSDB Health Monitor -bool tsdbAllowNewBlock(STsdbRepo* pRepo); -bool tsdbIdleMemEnough(); + +// no problem return true +bool tsdbNoProblem(STsdbRepo* pRepo); #ifdef __cplusplus } diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 15438fc234..5c3063128c 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -46,7 +46,7 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi static void mnodeCancelGetNextConn(void *pIter); static int32_t mnodeGetStreamMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, void *pConn); -static void mnodeFreeConn(void *data); +static void mnodeFreeConn(void *data, void* param1); static int32_t mnodeProcessKillQueryMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessKillStreamMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessKillConnectionMsg(SMnodeMsg *pMsg); @@ -135,7 +135,7 @@ SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t po return pConn; } -static void mnodeFreeConn(void *data) { +static void mnodeFreeConn(void *data, void* param1) { SConnObj *pConn = data; tfree(pConn->pQueries); tfree(pConn->pStreams); diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index bbfdb52e05..4e3c4797ac 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -46,7 +46,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessConnectMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessUseMsg(SMnodeMsg *mnodeMsg); -static void mnodeFreeShowObj(void *data); +static void mnodeFreeShowObj(void *data, void* param1); static bool mnodeAccquireShowObj(SShowObj *pShow); static bool mnodeCheckShowFinished(SShowObj *pShow); static void *mnodePutShowObj(SShowObj *pShow); @@ -420,7 +420,7 @@ static void* mnodePutShowObj(SShowObj *pShow) { return NULL; } -static void mnodeFreeShowObj(void *data) { +static void mnodeFreeShowObj(void *data, void* param1) { SShowObj *pShow = *(SShowObj **)data; if (tsMnodeShowFreeIterFp[pShow->type] != NULL) { if (pShow->pVgIter != NULL) { diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index 51adef11b9..7631c6d668 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -29,7 +29,7 @@ #include "httpContext.h" #include "httpParser.h" -static void httpDestroyContext(void *data); +static void httpDestroyContext(void *data, void* param1); static void httpRemoveContextFromEpoll(HttpContext *pContext) { HttpThread *pThread = pContext->pThread; @@ -44,7 +44,7 @@ static void httpRemoveContextFromEpoll(HttpContext *pContext) { } } -static void httpDestroyContext(void *data) { +static void httpDestroyContext(void *data, void* param1) { HttpContext *pContext = *(HttpContext **)data; if (pContext->fd > 0) taosCloseSocket(pContext->fd); diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c index 2e1ee7df2f..fd2415a5ab 100644 --- a/src/plugins/http/src/httpSession.c +++ b/src/plugins/http/src/httpSession.c @@ -95,7 +95,7 @@ void httpReleaseSession(HttpContext *pContext) { pContext->session = NULL; } -static void httpDestroySession(void *data) { +static void httpDestroySession(void *data, void* param1) { HttpSession *session = data; httpDebug("session:%p:%p, is destroyed, sessionRef:%d", session, session->taos, session->refCount); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 303612fc8e..e22552c265 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2387,7 +2387,7 @@ bool isQueryKilled(SQInfo *pQInfo) { (!needBuildResAfterQueryComplete(pQInfo))) { assert(pQInfo->startExecTs != 0); - qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d sec, abort current query execution, start:%" PRId64 + qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d ms, abort current query execution, start:%" PRId64 ", current:%d", pQInfo->qId, 1, pQInfo->startExecTs, taosGetTimestampSec()); return true; } diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index d9b01b031d..a409d955ad 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -35,12 +35,12 @@ typedef struct SQueryMgmt { bool closed; } SQueryMgmt; -static void queryMgmtKillQueryFn(void* handle) { +static void queryMgmtKillQueryFn(void* handle, void* param1) { void** fp = (void**)handle; qKillQuery(*fp); } -static void freeqinfoFn(void *qhandle) { +static void freeqinfoFn(void *qhandle, void* param1) { void** handle = qhandle; if (handle == NULL || *handle == NULL) { return; @@ -254,8 +254,6 @@ int waitMoment(SQInfo* pQInfo){ } } } - - taosMsleep(ms); } return 1; } @@ -274,7 +272,7 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { } *qId = pQInfo->qId; - pQInfo->startExecTs = taosGetTimestampSec(); + pQInfo->startExecTs = taosGetTimestampMs(); if (isQueryKilled(pQInfo)) { qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId); @@ -522,7 +520,7 @@ void qQueryMgmtNotifyClosed(void* pQMgmt) { pQueryMgmt->closed = true; pthread_mutex_unlock(&pQueryMgmt->lock); - taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn); + taosCacheRefresh(pQueryMgmt->qinfoPool, queryMgmtKillQueryFn, NULL); } void qQueryMgmtReOpen(void *pQMgmt) { @@ -641,95 +639,124 @@ int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCo } } + qReleaseQInfo(pMgmt, (void **)&handle, true); return error; } // local struct typedef struct { int64_t qId; - int32_t timeMs; + int64_t startExecTs; + int64_t commitedMs; } SLongQuery; -// compare -int compareLongQuery(const void* p1, const void* p2) { +// callbark for sort compare +static int compareLongQuery(const void* p1, const void* p2) { // sort desc SLongQuery* plq1 = (SLongQuery*)p1; SLongQuery* plq2 = (SLongQuery*)p2; - if(plq1->timeMs == plq2->timeMs) { + if(plq1->startExecTs == plq2->startExecTs) { return 0; - } else if(plq1->timeMs > plq2->timeMs) { + } else if(plq1->startExecTs > plq2->startExecTs) { return -1; } else { return 1; } } -// longquery -void* qObtainLongQuery(void* param, int32_t longMs){ - SQueryMgmt* qMgmt = (SQueryMgmt*)param; - if(qMgmt == NULL || qMgmt->qinfoPool == NULL) return NULL; - SArray* qids = taosArrayInit(4, sizeof(int64_t*)); +// callback for taosCacheRefresh +static void cbFoundItem(void* handle, void* param1) { + SQInfo * qInfo = *(SQInfo**) handle; + if(qInfo == NULL) return ; + SArray* qids = (SArray*) param1; + if(qids == NULL) return ; - SHashObj* pHashTable = qMgmt->qinfoPool->pHashTable; - if(pHashTable == NULL || pHashTable->hashList == NULL) return NULL; + bool usedMem = true; + bool usedIMem = true; + SMemTable* mem = qInfo->query.memRef.snapshot.omem; + SMemTable* imem = qInfo->query.memRef.snapshot.imem; + if(mem == NULL || T_REF_VAL_GET(mem) == 0) + usedMem = false; + if(imem == NULL || T_REF_VAL_GET(mem) == 0) + usedIMem = false ; - SQInfo * qInfo = (SQInfo*)taosHashIterate(pHashTable, NULL); - while(qInfo){ - // judge long query - SMemTable* imem = qInfo->runtimeEnv.pQueryAttr->memRef.snapshot.imem; - if(imem == NULL || imem->commitedMs == 0) continue; - int64_t now = taosGetTimestampMs(); - if(imem->commitedMs > now) continue; // weird, so skip + if(!usedMem && !usedIMem) + return ; + + // push to qids + SLongQuery* plq = (SLongQuery*)malloc(sizeof(SLongQuery)); + plq->qId = qInfo->qId; + plq->startExecTs = qInfo->startExecTs; - int32_t passMs = now - imem->commitedMs; - if(passMs < longMs) { - continue; - } - - // push - SLongQuery* plq = (SLongQuery*)malloc(sizeof(SLongQuery)); - plq->timeMs = passMs; - plq->qId = qInfo->qId; - taosArrayPush(qids, plq); - - // next - qInfo = (SQInfo*)taosHashIterate(pHashTable, qInfo); + // commitedMs + if(imem) { + plq->commitedMs = imem->commitedMs; + } else { + plq->commitedMs = 0; } + + taosArrayPush(qids, &plq); +} +// longquery +void* qObtainLongQuery(void* param){ + SQueryMgmt* qMgmt = (SQueryMgmt*)param; + if(qMgmt == NULL || qMgmt->qinfoPool == NULL) + return NULL; + SArray* qids = taosArrayInit(4, sizeof(int64_t*)); + if(qids == NULL) return NULL; + // Get each item + taosCacheRefresh(qMgmt->qinfoPool, cbFoundItem, qids); + size_t cnt = taosArrayGetSize(qids); if(cnt == 0) { - taosArrayDestroyEx(qids, free); + taosArrayDestroy(qids); return NULL; } - if(cnt > 1) { + if(cnt > 1) taosArraySort(qids, compareLongQuery); - } return qids; } //solve tsdb no block to commit -bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { +bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { SQueryMgmt *pQueryMgmt = pMgmt; - int32_t longMs = 2000; // TODO config to taos.cfg + bool fixed = false; // qid top list - SArray *qids = (SArray*)qObtainLongQuery(pQueryMgmt, longMs); + SArray *qids = (SArray*)qObtainLongQuery(pQueryMgmt); if(qids == NULL) return false; // kill Query + int64_t now = taosGetTimestampMs(); size_t cnt = taosArrayGetSize(qids); + size_t i; SLongQuery* plq; - for(size_t i=0; i < cnt; i++) { + for(i=0; i < cnt; i++) { plq = (SLongQuery* )taosArrayGetP(qids, i); - qKillQueryByQId(pMgmt, plq->qId, 100, 50); // wait 50*100 ms - - // check break condition - if(tsdbIdleMemEnough() && tsdbAllowNewBlock(pRepo)) { - break; + if(plq->startExecTs > now) continue; + if(now - plq->startExecTs >= longQueryMs) { + qKillQueryByQId(pMgmt, plq->qId, 100, 30); // wait 50*100 ms + if(tsdbNoProblem(pRepo)) { + fixed = true; + break; + } } } + // free qids - taosArrayDestroyEx(qids, free); - return true; + for(i=0; i < cnt; i++) { + free(taosArrayGetP(qids, i)); + } + taosArrayDestroy(qids); + return fixed; +} + +//solve tsdb no block to commit +bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { + if(qFixedNoBlock(pRepo, pMgmt, 20*1000)) { + return true; + } + return qFixedNoBlock(pRepo, pMgmt, 5*1000); } \ No newline at end of file diff --git a/src/tsdb/inc/tsdbHealth.h b/src/tsdb/inc/tsdbHealth.h index e70c26f939..324f4312e0 100644 --- a/src/tsdb/inc/tsdbHealth.h +++ b/src/tsdb/inc/tsdbHealth.h @@ -19,4 +19,7 @@ bool tsdbUrgeQueryFree(STsdbRepo* pRepo); int32_t tsdbInsertNewBlock(STsdbRepo* pRepo); +bool tsdbIdleMemEnough(); +bool tsdbAllowNewBlock(STsdbRepo* pRepo); + #endif /* _TD_TSDB_BUFFER_H_ */ diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index 532907ae01..3bbc4bd111 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -97,6 +97,7 @@ struct STsdbRepo { SMergeBuf mergeBuf; //used when update=2 int8_t compactState; // compact state: inCompact/noCompact/waitingCompact? + void* tmrCtrl; }; #define REPO_ID(r) (r)->config.tsdbId diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index 06a98323ab..af75fc45fd 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -64,6 +64,10 @@ int tsdbOpenBufPool(STsdbRepo *pRepo) { ASSERT(pPool != NULL); + // debug test + pCfg->cacheBlockSize = 1; + pCfg->totalBlocks = 4; + pPool->bufBlockSize = pCfg->cacheBlockSize * 1024 * 1024; // MB pPool->tBufBlocks = pCfg->totalBlocks; pPool->nBufBlocks = 0; @@ -119,16 +123,22 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { STsdbBufPool *pBufPool = pRepo->pPool; while (POOL_IS_EMPTY(pBufPool)) { + tsdbWarn("vgId:%d Pool empty,nBufBlocks=%d nElastic=%d nRecycle=%d", REPO_ID(pRepo), pBufPool->nBufBlocks, pBufPool->nElasticBlocks, pBufPool->nRecycleBlocks); // supply new Block if(tsdbInsertNewBlock(pRepo) > 0) { + tsdbWarn("vgId:%d Insert new block to solve.", REPO_ID(pRepo)); break; } else { // no newBlock, kill query free - tsdbUrgeQueryFree(pRepo); + if(!tsdbUrgeQueryFree(pRepo)) { + tsdbWarn("vgId:%d Urge query free thread start failed.", REPO_ID(pRepo)); + } } pRepo->repoLocked = false; + tsdbDebug("vgId:%d wait for new block...", REPO_ID(pRepo)); pthread_cond_wait(&(pBufPool->poolNotEmpty), &(pRepo->mutex)); + tsdbDebug("vgId:%d waited new block ok.", REPO_ID(pRepo)); pRepo->repoLocked = true; } diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index 32f6f4fdb8..b590df28fe 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -18,12 +18,14 @@ #include "tarray.h" #include "query.h" #include "tglobal.h" +#include "tlist.h" #include "tsdbint.h" #include "tsdbBuffer.h" #include "tsdbLog.h" #include "tsdbHealth.h" #include "ttimer.h" + // return malloc new block count int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { STsdbBufPool *pPool = pRepo->pPool; @@ -56,24 +58,28 @@ void cbKillQueryFree(void* param1, void* param2) { // return true do free , false do nothing bool tsdbUrgeQueryFree(STsdbRepo * pRepo) { // 1 start timer - tmr_h hTimer = taosTmrStart(cbKillQueryFree, 1, pRepo, NULL); + if(pRepo->tmrCtrl == NULL){ + pRepo->tmrCtrl = taosTmrInit(0, 0, 0, "REPO"); + } + + tmr_h hTimer = taosTmrStart(cbKillQueryFree, 1, pRepo, pRepo->tmrCtrl); return hTimer != NULL; } bool tsdbIdleMemEnough() { // TODO config to taos.cfg - int32_t lowestRate = 20; // below 20% idle memory, return not enough memory + int32_t lowestRate = 10; // below 10% idle memory, return not enough memory float memoryUsedMB = 0; float memoryAvailMB; - if (true != taosGetSysMemory(&memoryUsedMB)) { + if (!taosGetSysMemory(&memoryUsedMB)) { tsdbWarn("tsdbHealth get memory error, return false."); - return false; + return true; } if(memoryUsedMB > tsTotalMemoryMB || tsTotalMemoryMB == 0) { tsdbWarn("tsdbHealth used memory(%d MB) large total memory(%d MB), return false.", (int)memoryUsedMB, (int)tsTotalMemoryMB); - return false; + return true; } memoryAvailMB = (float)tsTotalMemoryMB - memoryUsedMB; @@ -88,7 +94,7 @@ bool tsdbIdleMemEnough() { bool tsdbAllowNewBlock(STsdbRepo* pRepo) { //TODO config to taos.cfg - int32_t nMaxElastic = 3; + int32_t nMaxElastic = 0; STsdbBufPool* pPool = pRepo->pPool; if(pPool->nElasticBlocks >= nMaxElastic) { tsdbWarn("tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", pPool->nElasticBlocks, nMaxElastic); @@ -96,3 +102,13 @@ bool tsdbAllowNewBlock(STsdbRepo* pRepo) { } return true; } + +bool tsdbNoProblem(STsdbRepo* pRepo) { + if(!tsdbIdleMemEnough()) + return false; + + if(listNEles(pRepo->pPool->bufBlockList)) + return false; + + return true; +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index b2e6fe8916..099e369de9 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -16,6 +16,7 @@ // no test file errors here #include "taosdef.h" #include "tsdbint.h" +#include "ttimer.h" #define IS_VALID_PRECISION(precision) \ (((precision) >= TSDB_TIME_PRECISION_MILLI) && ((precision) <= TSDB_TIME_PRECISION_NANO)) @@ -126,6 +127,10 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) { terrno = TSDB_CODE_SUCCESS; tsdbStopStream(pRepo); + if(pRepo->tmrCtrl){ + taosTmrCleanUp(pRepo->tmrCtrl); + pRepo->tmrCtrl = NULL; + } if (toCommit) { tsdbSyncCommit(repo); @@ -547,6 +552,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { pRepo->appH = *pAppH; } pRepo->repoLocked = false; + pRepo->tmrCtrl = NULL; int code = pthread_mutex_init(&(pRepo->mutex), NULL); if (code != 0) { diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index e41b544d00..0e0d1759a3 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -32,7 +32,7 @@ extern "C" { #define TSDB_CACHE_PTR_TYPE int64_t #endif -typedef void (*__cache_free_fn_t)(void*); +typedef void (*__cache_free_fn_t)(void*, void*); typedef struct SCacheStatis { int64_t missCount; @@ -176,7 +176,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj); * @param fp * @return */ -void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp); +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp, void* param1); /** * stop background refresh worker thread diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 69b3741e13..526b3df171 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -140,7 +140,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize); if (pCacheObj->freeFp) { - pCacheObj->freeFp(pNode->data); + pCacheObj->freeFp(pNode->data, NULL); } free(pNode); @@ -174,7 +174,7 @@ static FORCE_INLINE STrashElem* doRemoveElemInTrashcan(SCacheObj* pCacheObj, STr static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj* pCacheObj, STrashElem *pElem) { if (pCacheObj->freeFp) { - pCacheObj->freeFp(pElem->pData->data); + pCacheObj->freeFp(pElem->pData->data, NULL); } free(pElem->pData); @@ -249,7 +249,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v if (ret == 0) { if (T_REF_VAL_GET(p) == 0) { if (pCacheObj->freeFp) { - pCacheObj->freeFp(p->data); + pCacheObj->freeFp(p->data, NULL); } atomic_sub_fetch_64(&pCacheObj->totalSize, p->size); @@ -458,7 +458,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize); if (pCacheObj->freeFp) { - pCacheObj->freeFp(pNode->data); + pCacheObj->freeFp(pNode->data, NULL); } free(pNode); @@ -504,6 +504,7 @@ typedef struct SHashTravSupp { SCacheObj* pCacheObj; int64_t time; __cache_free_fn_t fp; + void* param1; } SHashTravSupp; static bool travHashTableEmptyFn(void* param, void* data) { @@ -662,17 +663,17 @@ bool travHashTableFn(void* param, void* data) { } if (ps->fp) { - (ps->fp)(pNode->data); + (ps->fp)(pNode->data, ps->param1); } // do not remove element in hash table return true; } -static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp) { +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp, void* param1) { assert(pCacheObj != NULL); - SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time}; + SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time, .param1 = param1}; taosHashCondTraverse(pCacheObj->pHashTable, travHashTableFn, &sup); } @@ -736,7 +737,7 @@ void* taosCacheTimedRefresh(void *handle) { // refresh data in hash table if (elemInHash > 0) { int64_t now = taosGetTimestampMs(); - doCacheRefresh(pCacheObj, now, NULL); + doCacheRefresh(pCacheObj, now, NULL, NULL); } taosTrashcanEmpty(pCacheObj, false); @@ -753,13 +754,13 @@ void* taosCacheTimedRefresh(void *handle) { return NULL; } -void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) { +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp, void* param1) { if (pCacheObj == NULL) { return; } int64_t now = taosGetTimestampMs(); - doCacheRefresh(pCacheObj, now, fp); + doCacheRefresh(pCacheObj, now, fp, param1); } void taosStopCacheRefreshWorker() { From 661fff6929d7d32c14d269732bee1eaeff46d810 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Mon, 23 Aug 2021 17:46:48 +0800 Subject: [PATCH 07/60] finished elastic block test --- src/inc/tsdb.h | 1 - src/query/src/queryMain.c | 30 ++++++++++++------------------ src/tsdb/src/tsdbBuffer.c | 5 ++++- src/tsdb/src/tsdbCommit.c | 1 - src/tsdb/src/tsdbHealth.c | 10 +++++----- 5 files changed, 21 insertions(+), 26 deletions(-) diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index b2219a53a7..089e30ac37 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -193,7 +193,6 @@ typedef struct { SList * bufBlockList; int64_t pointsAdd; // TODO int64_t storageAdd; // TODO - int64_t commitedMs; // commited ms time , zero is no commit. } SMemTable; typedef struct { diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index a409d955ad..7eeaac421a 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -249,7 +249,7 @@ int waitMoment(SQInfo* pQInfo){ taosMsleep(1000); used_ms += 1000; if(isQueryKilled(pQInfo)){ - printf(" check query is canceled, sleep break... \n"); + printf(" check query is canceled, sleep break... %s\n", pQInfo->sql); break; } } @@ -626,7 +626,7 @@ int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCo if (pQInfo == NULL || !isValidQInfo(pQInfo)) { return TSDB_CODE_QRY_INVALID_QHANDLE; } - qWarn("QId:0x%"PRIx64" query killed becase no memory commit.", pQInfo->qId); + qWarn("QId:0x%"PRIx64" be killed(no memory commit).", pQInfo->qId); setQueryKilled(pQInfo); // wait query stop @@ -647,20 +647,19 @@ int32_t qKillQueryByQId(void* pMgmt, int64_t qId, int32_t waitMs, int32_t waitCo typedef struct { int64_t qId; int64_t startExecTs; - int64_t commitedMs; } SLongQuery; // callbark for sort compare static int compareLongQuery(const void* p1, const void* p2) { // sort desc - SLongQuery* plq1 = (SLongQuery*)p1; - SLongQuery* plq2 = (SLongQuery*)p2; + SLongQuery* plq1 = *(SLongQuery**)p1; + SLongQuery* plq2 = *(SLongQuery**)p2; if(plq1->startExecTs == plq2->startExecTs) { return 0; } else if(plq1->startExecTs > plq2->startExecTs) { - return -1; - } else { return 1; + } else { + return -1; } } @@ -686,15 +685,7 @@ static void cbFoundItem(void* handle, void* param1) { // push to qids SLongQuery* plq = (SLongQuery*)malloc(sizeof(SLongQuery)); plq->qId = qInfo->qId; - plq->startExecTs = qInfo->startExecTs; - - // commitedMs - if(imem) { - plq->commitedMs = imem->commitedMs; - } else { - plq->commitedMs = 0; - } - + plq->startExecTs = qInfo->startExecTs; taosArrayPush(qids, &plq); } @@ -735,11 +726,13 @@ bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { SLongQuery* plq; for(i=0; i < cnt; i++) { plq = (SLongQuery* )taosArrayGetP(qids, i); + printf(" sort i=%d span=%d qid=0x%"PRIx64" exeTime=0x%"PRIx64". \n",(int)i, (int)(now - plq->startExecTs), plq->qId, plq->startExecTs); if(plq->startExecTs > now) continue; if(now - plq->startExecTs >= longQueryMs) { - qKillQueryByQId(pMgmt, plq->qId, 100, 30); // wait 50*100 ms + qKillQueryByQId(pMgmt, plq->qId, 500, 10); // wait 50*100 ms if(tsdbNoProblem(pRepo)) { fixed = true; + qWarn("QId:0x%"PRIx64" fixed problem after kill this query.", plq->qId); break; } } @@ -755,8 +748,9 @@ bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { //solve tsdb no block to commit bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { + qWarn("start solve no block problem."); if(qFixedNoBlock(pRepo, pMgmt, 20*1000)) { return true; } return qFixedNoBlock(pRepo, pMgmt, 5*1000); -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index af75fc45fd..2ba41dca2a 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -216,7 +216,10 @@ void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic tsdbFreeBufBlock(pBufBlock); free(pNode); if(bELastic) - pPool->nElasticBlocks--; + { + pPool->nElasticBlocks--; + printf(" elastic block reduce one ok. current blocks=%d \n", pPool->nElasticBlocks); + } else pPool->nBufBlocks--; } \ No newline at end of file diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 6fae5c6555..5514fa80c8 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -548,7 +548,6 @@ static void tsdbEndCommit(STsdbRepo *pRepo, int eno) { pRepo->imem = NULL; (void)tsdbUnlockRepo(pRepo); //save commited time - pIMem->commitedMs = taosGetTimestampMs(); tsdbUnRefMemTable(pRepo, pIMem); tsem_post(&(pRepo->readyToCommit)); } diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index b590df28fe..dddf40d963 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -40,6 +40,7 @@ int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { } else { pPool->nElasticBlocks ++; cnt ++ ; + printf(" elastic block add one ok. current blocks=%d \n", pPool->nElasticBlocks); } } } @@ -68,7 +69,7 @@ bool tsdbUrgeQueryFree(STsdbRepo * pRepo) { bool tsdbIdleMemEnough() { // TODO config to taos.cfg - int32_t lowestRate = 10; // below 10% idle memory, return not enough memory + int32_t lowestRate = 5; // below 10% idle memory, return not enough memory float memoryUsedMB = 0; float memoryAvailMB; @@ -94,8 +95,9 @@ bool tsdbIdleMemEnough() { bool tsdbAllowNewBlock(STsdbRepo* pRepo) { //TODO config to taos.cfg - int32_t nMaxElastic = 0; + int32_t nMaxElastic = 1; STsdbBufPool* pPool = pRepo->pPool; + printf("tsdbAllowNewBlock nElasticBlock(%d) MaxElasticBlocks(%d)\n", pPool->nElasticBlocks, nMaxElastic); if(pPool->nElasticBlocks >= nMaxElastic) { tsdbWarn("tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", pPool->nElasticBlocks, nMaxElastic); return false; @@ -106,9 +108,7 @@ bool tsdbAllowNewBlock(STsdbRepo* pRepo) { bool tsdbNoProblem(STsdbRepo* pRepo) { if(!tsdbIdleMemEnough()) return false; - - if(listNEles(pRepo->pPool->bufBlockList)) + if(listNEles(pRepo->pPool->bufBlockList) == 0) return false; - return true; } \ No newline at end of file From 951ff0071a6af4d10d56affaadd6668347073a52 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Tue, 24 Aug 2021 11:36:35 +0800 Subject: [PATCH 08/60] add switch and elastic block equal totalBlocks 1/3 --- src/common/inc/tglobal.h | 2 ++ src/common/src/tglobal.c | 13 +++++++++++++ src/tsdb/src/tsdbBuffer.c | 29 ++++++++++------------------- src/tsdb/src/tsdbHealth.c | 34 ++-------------------------------- 4 files changed, 27 insertions(+), 51 deletions(-) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 4b8347ead0..e53c898718 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -221,6 +221,8 @@ extern uint32_t maxRange; extern uint32_t curRange; extern char Compressor[]; #endif +// long query +extern int8_t tsDeathLockKillQuery; typedef struct { char dir[TSDB_FILENAME_LEN]; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index f169b07bb2..c7725dde08 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -276,6 +276,9 @@ uint32_t curRange = 100; // range char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR #endif +// long query death-lock +int8_t tsDeathLockKillQuery = 0; + int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; void (*monExecuteSQLFp)(char *sql) = NULL; @@ -1647,6 +1650,16 @@ static void doInitGlobalConfig(void) { taosInitConfigOption(cfg); #endif + // enable kill long query + cfg.option = "deathLockKillQuery"; + cfg.ptr = &tsDeathLockKillQuery; + cfg.valType = TAOS_CFG_VTYPE_INT8; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = 1; + cfg.ptrLength = 1; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); } void taosInitGlobalCfg() { diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index 2ba41dca2a..ec385ef83e 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -123,22 +123,20 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { STsdbBufPool *pBufPool = pRepo->pPool; while (POOL_IS_EMPTY(pBufPool)) { - tsdbWarn("vgId:%d Pool empty,nBufBlocks=%d nElastic=%d nRecycle=%d", REPO_ID(pRepo), pBufPool->nBufBlocks, pBufPool->nElasticBlocks, pBufPool->nRecycleBlocks); - // supply new Block - if(tsdbInsertNewBlock(pRepo) > 0) { - tsdbWarn("vgId:%d Insert new block to solve.", REPO_ID(pRepo)); - break; - } else { - // no newBlock, kill query free - if(!tsdbUrgeQueryFree(pRepo)) { - tsdbWarn("vgId:%d Urge query free thread start failed.", REPO_ID(pRepo)); + if(tsDeathLockKillQuery) { + // supply new Block + if(tsdbInsertNewBlock(pRepo) > 0) { + tsdbWarn("vgId:%d Insert elastic new block to solve.", REPO_ID(pRepo)); + break; + } else { + // no newBlock, kill query free + if(!tsdbUrgeQueryFree(pRepo)) + tsdbWarn("vgId:%d Urge query free thread start failed.", REPO_ID(pRepo)); } } pRepo->repoLocked = false; - tsdbDebug("vgId:%d wait for new block...", REPO_ID(pRepo)); pthread_cond_wait(&(pBufPool->poolNotEmpty), &(pRepo->mutex)); - tsdbDebug("vgId:%d waited new block ok.", REPO_ID(pRepo)); pRepo->repoLocked = true; } @@ -160,7 +158,7 @@ STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { STsdbBufBlock *pBufBlock = (STsdbBufBlock *)malloc(sizeof(*pBufBlock) + bufBlockSize); if (pBufBlock == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto _err; + return NULL; } pBufBlock->blockId = 0; @@ -168,10 +166,6 @@ STsdbBufBlock *tsdbNewBufBlock(int bufBlockSize) { pBufBlock->remain = bufBlockSize; return pBufBlock; - -_err: - tsdbFreeBufBlock(pBufBlock); - return NULL; } void tsdbFreeBufBlock(STsdbBufBlock *pBufBlock) { tfree(pBufBlock); } @@ -216,10 +210,7 @@ void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic tsdbFreeBufBlock(pBufBlock); free(pNode); if(bELastic) - { pPool->nElasticBlocks--; - printf(" elastic block reduce one ok. current blocks=%d \n", pPool->nElasticBlocks); - } else pPool->nBufBlocks--; } \ No newline at end of file diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index dddf40d963..cc6bae02b0 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -31,7 +31,7 @@ int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { STsdbBufPool *pPool = pRepo->pPool; int32_t cnt = 0; - if(tsdbIdleMemEnough() && tsdbAllowNewBlock(pRepo)) { + if(tsdbAllowNewBlock(pRepo)) { STsdbBufBlock *pBufBlock = tsdbNewBufBlock(pPool->bufBlockSize); if (pBufBlock) { if (tdListAppend(pPool->bufBlockList, (void *)(&pBufBlock)) < 0) { @@ -67,37 +67,9 @@ bool tsdbUrgeQueryFree(STsdbRepo * pRepo) { return hTimer != NULL; } -bool tsdbIdleMemEnough() { - // TODO config to taos.cfg - int32_t lowestRate = 5; // below 10% idle memory, return not enough memory - float memoryUsedMB = 0; - float memoryAvailMB; - - if (!taosGetSysMemory(&memoryUsedMB)) { - tsdbWarn("tsdbHealth get memory error, return false."); - return true; - } - - if(memoryUsedMB > tsTotalMemoryMB || tsTotalMemoryMB == 0) { - tsdbWarn("tsdbHealth used memory(%d MB) large total memory(%d MB), return false.", (int)memoryUsedMB, (int)tsTotalMemoryMB); - return true; - } - - memoryAvailMB = (float)tsTotalMemoryMB - memoryUsedMB; - int32_t rate = (int32_t)(memoryAvailMB/tsTotalMemoryMB * 100); - if(rate < lowestRate){ - tsdbWarn("tsdbHealth real rate :%d less than lowest rate:%d, so return false.", rate, lowestRate); - return false; - } - - return true; -} - bool tsdbAllowNewBlock(STsdbRepo* pRepo) { - //TODO config to taos.cfg - int32_t nMaxElastic = 1; + int32_t nMaxElastic = pRepo->config.totalBlocks/3; STsdbBufPool* pPool = pRepo->pPool; - printf("tsdbAllowNewBlock nElasticBlock(%d) MaxElasticBlocks(%d)\n", pPool->nElasticBlocks, nMaxElastic); if(pPool->nElasticBlocks >= nMaxElastic) { tsdbWarn("tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", pPool->nElasticBlocks, nMaxElastic); return false; @@ -106,8 +78,6 @@ bool tsdbAllowNewBlock(STsdbRepo* pRepo) { } bool tsdbNoProblem(STsdbRepo* pRepo) { - if(!tsdbIdleMemEnough()) - return false; if(listNEles(pRepo->pPool->bufBlockList) == 0) return false; return true; From a1d8119aac003202a8d6f066385cd7c7caec8a36 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Tue, 24 Aug 2021 14:14:27 +0800 Subject: [PATCH 09/60] remove test information --- src/query/src/queryMain.c | 11 ++++++----- src/tsdb/src/tsdbBuffer.c | 7 +------ src/tsdb/src/tsdbHealth.c | 3 +-- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index c880dc2aaa..5e26bb534a 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -215,6 +215,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi return code; } +#ifdef TEST_IMPL // wait moment int waitMoment(SQInfo* pQInfo){ if(pQInfo->sql) { @@ -239,7 +240,7 @@ int waitMoment(SQInfo* pQInfo){ } } - printf("wait sleep %dms ... sql=%s\n", ms, pQInfo->sql); + printf("wait sleep %dms. sql=%s\n", ms, pQInfo->sql); if(ms < 1000) { taosMsleep(ms); @@ -257,7 +258,7 @@ int waitMoment(SQInfo* pQInfo){ } return 1; } - +#endif bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { SQInfo *pQInfo = (SQInfo *)qinfo; @@ -303,8 +304,9 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { int64_t st = taosGetTimestampUs(); pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup); pQInfo->summary.elapsedTime += (taosGetTimestampUs() - st); +#ifdef TEST_IMPL waitMoment(pQInfo); - +#endif publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC); pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv); @@ -730,7 +732,6 @@ bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { SLongQuery* plq; for(i=0; i < cnt; i++) { plq = (SLongQuery* )taosArrayGetP(qids, i); - printf(" sort i=%d span=%d qid=0x%"PRIx64" exeTime=0x%"PRIx64". \n",(int)i, (int)(now - plq->startExecTs), plq->qId, plq->startExecTs); if(plq->startExecTs > now) continue; if(now - plq->startExecTs >= longQueryMs) { qKillQueryByQId(pMgmt, plq->qId, 500, 10); // wait 50*100 ms @@ -752,7 +753,7 @@ bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { //solve tsdb no block to commit bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { - qWarn("start solve no block problem."); + qWarn("pRepo=%p start solve no block problem.", pRepo); if(qFixedNoBlock(pRepo, pMgmt, 20*1000)) { return true; } diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index ec385ef83e..be64f9ee1b 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -63,11 +63,6 @@ int tsdbOpenBufPool(STsdbRepo *pRepo) { STsdbBufPool *pPool = pRepo->pPool; ASSERT(pPool != NULL); - - // debug test - pCfg->cacheBlockSize = 1; - pCfg->totalBlocks = 4; - pPool->bufBlockSize = pCfg->cacheBlockSize * 1024 * 1024; // MB pPool->tBufBlocks = pCfg->totalBlocks; pPool->nBufBlocks = 0; @@ -126,7 +121,7 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { if(tsDeathLockKillQuery) { // supply new Block if(tsdbInsertNewBlock(pRepo) > 0) { - tsdbWarn("vgId:%d Insert elastic new block to solve.", REPO_ID(pRepo)); + tsdbWarn("vgId:%d add new elastic block . elasticBlocks=%d totalBlocks=%d", REPO_ID(pRepo), pBufPool->nElasticBlocks, pBufPool->nBufBlocks); break; } else { // no newBlock, kill query free diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index cc6bae02b0..63dffff3dd 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -39,8 +39,7 @@ int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { tsdbFreeBufBlock(pBufBlock); } else { pPool->nElasticBlocks ++; - cnt ++ ; - printf(" elastic block add one ok. current blocks=%d \n", pPool->nElasticBlocks); + cnt ++ ; } } } From e14c3fc7a5353857e48d673498ba7a07b13a8cdb Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Tue, 24 Aug 2021 14:23:52 +0800 Subject: [PATCH 10/60] restore no change file --- src/tsdb/src/tsdbCommit.c | 1 - src/vnode/src/vnodeSync.c | 1 - 2 files changed, 2 deletions(-) diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 084dd62da9..15fc3cc47d 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -702,7 +702,6 @@ static void tsdbEndCommit(STsdbRepo *pRepo, int eno) { (void)tsdbLockRepo(pRepo); pRepo->imem = NULL; (void)tsdbUnlockRepo(pRepo); - //save commited time tsdbUnRefMemTable(pRepo, pIMem); tsem_post(&(pRepo->readyToCommit)); } diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index 7a886cf758..2bdfd2ead3 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -178,4 +178,3 @@ void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code, bool forc SVnodeObj *pVnode = vparam; syncConfirmForward(pVnode->sync, version, code, force); } - From 3a1773f565843cdbe3059cb26cf0346bea25bc4c Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Tue, 24 Aug 2021 17:02:44 +0800 Subject: [PATCH 11/60] long query first kill 10minutes , second kill 2minutes --- src/query/src/queryMain.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 5e26bb534a..b92a4625f6 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -754,8 +754,8 @@ bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { //solve tsdb no block to commit bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { qWarn("pRepo=%p start solve no block problem.", pRepo); - if(qFixedNoBlock(pRepo, pMgmt, 20*1000)) { + if(qFixedNoBlock(pRepo, pMgmt, 10*60*1000)) { return true; } - return qFixedNoBlock(pRepo, pMgmt, 5*1000); + return qFixedNoBlock(pRepo, pMgmt, 2*60*1000); } From a55df5e92765efcc74e8f76738e3e0bc1665eba6 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Tue, 24 Aug 2021 18:14:14 +0800 Subject: [PATCH 12/60] fixed deadlock missspelling and printf replace with log api --- src/common/inc/tglobal.h | 2 +- src/common/src/tglobal.c | 6 +++--- src/query/src/queryMain.c | 6 +++--- src/tsdb/src/tsdbBuffer.c | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index e53c898718..26f9faecca 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -222,7 +222,7 @@ extern uint32_t curRange; extern char Compressor[]; #endif // long query -extern int8_t tsDeathLockKillQuery; +extern int8_t tsDeadLockKillQuery; typedef struct { char dir[TSDB_FILENAME_LEN]; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index c7725dde08..1677c6b52a 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -277,7 +277,7 @@ char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESS #endif // long query death-lock -int8_t tsDeathLockKillQuery = 0; +int8_t tsDeadLockKillQuery = 0; int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; @@ -1651,8 +1651,8 @@ static void doInitGlobalConfig(void) { #endif // enable kill long query - cfg.option = "deathLockKillQuery"; - cfg.ptr = &tsDeathLockKillQuery; + cfg.option = "deadLockKillQuery"; + cfg.ptr = &tsDeadLockKillQuery; cfg.valType = TAOS_CFG_VTYPE_INT8; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.minValue = 0; diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index b92a4625f6..77fcdb0825 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -239,8 +239,8 @@ int waitMoment(SQInfo* pQInfo){ ms *= 1000; } } - - printf("wait sleep %dms. sql=%s\n", ms, pQInfo->sql); + if(ms == 0) return 0; + qWarn("wait sleep %dms. sql=%s", ms, pQInfo->sql); if(ms < 1000) { taosMsleep(ms); @@ -250,7 +250,7 @@ int waitMoment(SQInfo* pQInfo){ taosMsleep(1000); used_ms += 1000; if(isQueryKilled(pQInfo)){ - printf(" check query is canceled, sleep break... %s\n", pQInfo->sql); + qWarn("check query is canceled, sleep break... %s", pQInfo->sql); break; } } diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index be64f9ee1b..3dae2c6f4a 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -118,7 +118,7 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { STsdbBufPool *pBufPool = pRepo->pPool; while (POOL_IS_EMPTY(pBufPool)) { - if(tsDeathLockKillQuery) { + if(tsDeadLockKillQuery) { // supply new Block if(tsdbInsertNewBlock(pRepo) > 0) { tsdbWarn("vgId:%d add new elastic block . elasticBlocks=%d totalBlocks=%d", REPO_ID(pRepo), pBufPool->nElasticBlocks, pBufPool->nBufBlocks); From 34691fb6c797c6449beaac8fd04ededc5d204f6e Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Wed, 25 Aug 2021 10:51:28 +0800 Subject: [PATCH 13/60] modify pytest case proxy chaopingwu --- .../functions/showOfflineThresholdIs864000.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index 8ec25cef26..7462d4cd72 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -12,6 +12,8 @@ # -*- coding: utf-8 -*- import sys +import numpy as np + from util.log import * from util.cases import * from util.sql import * @@ -24,8 +26,17 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) def run(self): + # tdSql.query("show variables") + # tdSql.checkData(54, 1, 864000) + tdSql.execute("show variables") + res = tdSql.cursor.fetchall() + resList = np.array(res) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() tdSql.query("show variables") - tdSql.checkData(55, 1, 864000) + tdSql.checkData(index_value, 1, 864000) + pass + def stop(self): tdSql.close() From ca43acde1e7ee1fce13657ddd1525e89f83a2588 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Fri, 27 Aug 2021 18:08:16 +0800 Subject: [PATCH 14/60] use new thread replace timer --- src/tsdb/inc/tsdbint.h | 2 +- src/tsdb/src/tsdbHealth.c | 31 +++++++++++++++----- src/tsdb/src/tsdbMain.c | 9 +++--- src/util/inc/tthread.h | 37 +++++++++++++++++++++++ src/util/src/tthread.c | 62 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 128 insertions(+), 13 deletions(-) create mode 100644 src/util/inc/tthread.h create mode 100644 src/util/src/tthread.c diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index 3bbc4bd111..80e9297579 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -97,7 +97,7 @@ struct STsdbRepo { SMergeBuf mergeBuf; //used when update=2 int8_t compactState; // compact state: inCompact/noCompact/waitingCompact? - void* tmrCtrl; + pthread_t* pthread; }; #define REPO_ID(r) (r)->config.tsdbId diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index 63dffff3dd..44cc3ff423 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -24,6 +24,7 @@ #include "tsdbLog.h" #include "tsdbHealth.h" #include "ttimer.h" +#include "tthread.h" // return malloc new block count @@ -47,30 +48,44 @@ int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { } // switch anther thread to run -void cbKillQueryFree(void* param1, void* param2) { +void* cbKillQueryFree(void* param1) { STsdbRepo* pRepo = (STsdbRepo*)param1; // vnode if(pRepo->appH.notifyStatus) { pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_NOBLOCK, TSDB_CODE_SUCCESS); } + + // free + if(pRepo->pthread){ + void* p = pRepo->pthread; + pRepo->pthread = NULL; + free(p); + } + + return NULL; } // return true do free , false do nothing bool tsdbUrgeQueryFree(STsdbRepo * pRepo) { - // 1 start timer - if(pRepo->tmrCtrl == NULL){ - pRepo->tmrCtrl = taosTmrInit(0, 0, 0, "REPO"); + // check previous running + if(pRepo->pthread && taosThreadRunning(pRepo->pthread)) { + tsdbWarn("vgId:%d pre urge thread is runing. nBlocks=%d nElasticBlocks=%d", REPO_ID(pRepo), pRepo->pPool->nBufBlocks, pRepo->pPool->nElasticBlocks); + return false; } - - tmr_h hTimer = taosTmrStart(cbKillQueryFree, 1, pRepo, pRepo->tmrCtrl); - return hTimer != NULL; + // create new + pRepo->pthread = taosCreateThread(cbKillQueryFree, pRepo); + if(pRepo->pthread == NULL) { + tsdbError("vgId:%d create urge thread error.", REPO_ID(pRepo)); + return false; + } + return true; } bool tsdbAllowNewBlock(STsdbRepo* pRepo) { int32_t nMaxElastic = pRepo->config.totalBlocks/3; STsdbBufPool* pPool = pRepo->pPool; if(pPool->nElasticBlocks >= nMaxElastic) { - tsdbWarn("tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", pPool->nElasticBlocks, nMaxElastic); + tsdbWarn("vgId:%d tsdbAllowNewBlock return fasle. nElasticBlock(%d) >= MaxElasticBlocks(%d)", REPO_ID(pRepo), pPool->nElasticBlocks, nMaxElastic); return false; } return true; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 099e369de9..c2021963e0 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -17,6 +17,7 @@ #include "taosdef.h" #include "tsdbint.h" #include "ttimer.h" +#include "tthread.h" #define IS_VALID_PRECISION(precision) \ (((precision) >= TSDB_TIME_PRECISION_MILLI) && ((precision) <= TSDB_TIME_PRECISION_NANO)) @@ -127,9 +128,9 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) { terrno = TSDB_CODE_SUCCESS; tsdbStopStream(pRepo); - if(pRepo->tmrCtrl){ - taosTmrCleanUp(pRepo->tmrCtrl); - pRepo->tmrCtrl = NULL; + if(pRepo->pthread){ + taosDestoryThread(pRepo->pthread); + pRepo->pthread = NULL; } if (toCommit) { @@ -552,7 +553,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { pRepo->appH = *pAppH; } pRepo->repoLocked = false; - pRepo->tmrCtrl = NULL; + pRepo->pthread = NULL; int code = pthread_mutex_init(&(pRepo->mutex), NULL); if (code != 0) { diff --git a/src/util/inc/tthread.h b/src/util/inc/tthread.h new file mode 100644 index 0000000000..7443ad706d --- /dev/null +++ b/src/util/inc/tthread.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TTHREAD_H +#define TDENGINE_TTHREAD_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "os.h" +#include "taosdef.h" + +// create new thread +pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param); +// destory thread +bool taosDestoryThread(pthread_t* pthread); +// thread running return true +bool taosThreadRunning(pthread_t* pthread); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_TTHREAD_H diff --git a/src/util/src/tthread.c b/src/util/src/tthread.c new file mode 100644 index 0000000000..043b2de2f2 --- /dev/null +++ b/src/util/src/tthread.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "tthread.h" +#include "tglobal.h" +#include "taosdef.h" +#include "tutil.h" +#include "tulog.h" +#include "taoserror.h" + +// create new thread +pthread_t* taosCreateThread( void *(*__start_routine) (void *), void* param) { + pthread_t* pthread = (pthread_t*)malloc(sizeof(pthread_t)); + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + int32_t ret = pthread_create(pthread, &thattr, __start_routine, param); + pthread_attr_destroy(&thattr); + + if (ret != 0) { + free(pthread); + return NULL; + } + return pthread; +} + +// destory thread +bool taosDestoryThread(pthread_t* pthread) { + if(pthread == NULL) return false; + if(taosThreadRunning(pthread)) { + pthread_cancel(*pthread); + pthread_join(*pthread, NULL); + } + + free(pthread); + return true; +} + +// thread running return true +bool taosThreadRunning(pthread_t* pthread) { + if(pthread == NULL) return false; + int ret = pthread_kill(*pthread, 0); + if(ret == ESRCH) + return false; + if(ret == EINVAL) + return false; + // alive + return true; +} From 27f9f9e43574188704d8b0b4174d34b25dc73350 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Sat, 28 Aug 2021 12:25:51 +0800 Subject: [PATCH 15/60] reduce to 30s for long query --- src/query/src/queryMain.c | 15 +++++++++++---- src/tsdb/src/tsdbBuffer.c | 7 +++++-- src/tsdb/src/tsdbMemTable.c | 2 +- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 77fcdb0825..902fa34722 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -240,7 +240,7 @@ int waitMoment(SQInfo* pQInfo){ } } if(ms == 0) return 0; - qWarn("wait sleep %dms. sql=%s", ms, pQInfo->sql); + printf("test wait sleep %dms. sql=%s ...\n", ms, pQInfo->sql); if(ms < 1000) { taosMsleep(ms); @@ -250,7 +250,7 @@ int waitMoment(SQInfo* pQInfo){ taosMsleep(1000); used_ms += 1000; if(isQueryKilled(pQInfo)){ - qWarn("check query is canceled, sleep break... %s", pQInfo->sql); + printf("test check query is canceled, sleep break.%s\n", pQInfo->sql); break; } } @@ -753,9 +753,16 @@ bool qFixedNoBlock(void* pRepo, void* pMgmt, int32_t longQueryMs) { //solve tsdb no block to commit bool qSolveCommitNoBlock(void* pRepo, void* pMgmt) { - qWarn("pRepo=%p start solve no block problem.", pRepo); + qWarn("pRepo=%p start solve problem.", pRepo); if(qFixedNoBlock(pRepo, pMgmt, 10*60*1000)) { return true; } - return qFixedNoBlock(pRepo, pMgmt, 2*60*1000); + if(qFixedNoBlock(pRepo, pMgmt, 2*60*1000)){ + return true; + } + if(qFixedNoBlock(pRepo, pMgmt, 30*1000)){ + return true; + } + qWarn("pRepo=%p solve problem failed.", pRepo); + return false; } diff --git a/src/tsdb/src/tsdbBuffer.c b/src/tsdb/src/tsdbBuffer.c index 3dae2c6f4a..70589031f6 100644 --- a/src/tsdb/src/tsdbBuffer.c +++ b/src/tsdb/src/tsdbBuffer.c @@ -121,7 +121,7 @@ SListNode *tsdbAllocBufBlockFromPool(STsdbRepo *pRepo) { if(tsDeadLockKillQuery) { // supply new Block if(tsdbInsertNewBlock(pRepo) > 0) { - tsdbWarn("vgId:%d add new elastic block . elasticBlocks=%d totalBlocks=%d", REPO_ID(pRepo), pBufPool->nElasticBlocks, pBufPool->nBufBlocks); + tsdbWarn("vgId:%d add new elastic block . elasticBlocks=%d cur free Blocks=%d", REPO_ID(pRepo), pBufPool->nElasticBlocks, pBufPool->bufBlockList->numOfEles); break; } else { // no newBlock, kill query free @@ -205,7 +205,10 @@ void tsdbRecycleBufferBlock(STsdbBufPool* pPool, SListNode *pNode, bool bELastic tsdbFreeBufBlock(pBufBlock); free(pNode); if(bELastic) - pPool->nElasticBlocks--; + { + pPool->nElasticBlocks--; + tsdbWarn("pPool=%p elastic block reduce one . nElasticBlocks=%d cur free Blocks=%d", pPool, pPool->nElasticBlocks, pPool->bufBlockList->numOfEles); + } else pPool->nBufBlocks--; } \ No newline at end of file diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 02c9946704..3890dca5b9 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -106,7 +106,7 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { tsdbRecycleBufferBlock(pBufPool, pNode, false); pBufPool->nRecycleBlocks -= 1; } else { - if(pBufPool->nElasticBlocks > 0 && listNEles(pBufPool->bufBlockList) > 0) { + if(pBufPool->nElasticBlocks > 0 && listNEles(pBufPool->bufBlockList) > 2) { tsdbRecycleBufferBlock(pBufPool, pNode, true); } else { tdListAppendNode(pBufPool->bufBlockList, pNode); From f1b44426aed05dced005a3bc6dca7c2aa620d7da Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Mon, 30 Aug 2021 11:22:07 +0800 Subject: [PATCH 16/60] modify set startExecTs only once --- src/query/src/qExecutor.c | 1 + src/query/src/queryMain.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 1b2d72d5e1..9d6a69019f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -8389,6 +8389,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S } pQInfo->qId = qId; + pQInfo->startExecTs = 0; pQInfo->runtimeEnv.pUdfInfo = pUdfInfo; diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 902fa34722..dd7d77adb0 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -273,7 +273,8 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { } *qId = pQInfo->qId; - pQInfo->startExecTs = taosGetTimestampMs(); + if(pQInfo->startExecTs == 0) + pQInfo->startExecTs = taosGetTimestampMs(); if (isQueryKilled(pQInfo)) { qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId); From e415d0f359182ed45b0c55d6e09fab698cb48927 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Mon, 30 Aug 2021 11:29:09 +0800 Subject: [PATCH 17/60] change startExecTs used to seconds --- src/query/src/qExecutor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 9d6a69019f..da42b54255 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2395,7 +2395,7 @@ bool isQueryKilled(SQInfo *pQInfo) { // query has been executed more than tsShellActivityTimer, and the retrieve has not arrived // abort current query execution. - if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs) > getMaximumIdleDurationSec()) && + if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs/1000) > getMaximumIdleDurationSec()) && (!needBuildResAfterQueryComplete(pQInfo))) { assert(pQInfo->startExecTs != 0); From f1fe388881a7128c6c2ebb5bef5e1ed5df342b21 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Tue, 31 Aug 2021 10:36:52 +0800 Subject: [PATCH 18/60] restore showOffline....py file --- .../functions/showOfflineThresholdIs864000.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index 7462d4cd72..8ec25cef26 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -12,8 +12,6 @@ # -*- coding: utf-8 -*- import sys -import numpy as np - from util.log import * from util.cases import * from util.sql import * @@ -26,17 +24,8 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) def run(self): - # tdSql.query("show variables") - # tdSql.checkData(54, 1, 864000) - tdSql.execute("show variables") - res = tdSql.cursor.fetchall() - resList = np.array(res) - index = np.where(resList == "offlineThreshold") - index_value = np.dstack((index[0])).squeeze() tdSql.query("show variables") - tdSql.checkData(index_value, 1, 864000) - pass - + tdSql.checkData(55, 1, 864000) def stop(self): tdSql.close() From 281a63af72b7bc17f31406ddad4d503b32186071 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Tue, 31 Aug 2021 10:43:58 +0800 Subject: [PATCH 19/60] add++ TSDB_CFG_MAX_NUM --- src/util/inc/tconfig.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index d03ce6e0f1..00dc65aa3d 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 116 // 110 + 6 with lossy option +#define TSDB_CFG_MAX_NUM 117 // 110 + 6(lossy) + 1(deadlock) #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 From afb9962c2f86003b7f58be1357bfcf7d3e4271ec Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Tue, 31 Aug 2021 11:48:54 +0800 Subject: [PATCH 20/60] showOffline needed --- .../functions/showOfflineThresholdIs864000.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index 8ec25cef26..7462d4cd72 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -12,6 +12,8 @@ # -*- coding: utf-8 -*- import sys +import numpy as np + from util.log import * from util.cases import * from util.sql import * @@ -24,8 +26,17 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) def run(self): + # tdSql.query("show variables") + # tdSql.checkData(54, 1, 864000) + tdSql.execute("show variables") + res = tdSql.cursor.fetchall() + resList = np.array(res) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() tdSql.query("show variables") - tdSql.checkData(55, 1, 864000) + tdSql.checkData(index_value, 1, 864000) + pass + def stop(self): tdSql.close() From 85be9550016048474e05fcfd67bce73abca4fdf1 Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Wed, 1 Sep 2021 10:54:51 +0800 Subject: [PATCH 21/60] cache use trav_fn callback --- src/client/src/tscSystem.c | 2 +- src/mnode/src/mnodeProfile.c | 4 ++-- src/mnode/src/mnodeShow.c | 4 ++-- src/plugins/http/src/httpContext.c | 4 ++-- src/plugins/http/src/httpSession.c | 2 +- src/query/src/queryMain.c | 2 +- src/tsdb/src/tsdbHealth.c | 4 ++-- src/util/inc/tcache.h | 5 +++-- src/util/src/tcache.c | 14 +++++++------- 9 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index dc4a32cc13..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -59,7 +59,7 @@ void tscCheckDiskUsage(void *UNUSED_PARAM(para), void *UNUSED_PARAM(param)) { taosTmrReset(tscCheckDiskUsage, 20 * 1000, NULL, tscTmr, &tscCheckDiskUsageTmr); } -void tscFreeRpcObj(void *param, void* param1) { +void tscFreeRpcObj(void *param) { assert(param); SRpcObj *pRpcObj = (SRpcObj *)(param); tscDebug("free rpcObj:%p and free pDnodeConn: %p", pRpcObj, pRpcObj->pDnodeConn); diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 5c3063128c..15438fc234 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -46,7 +46,7 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi static void mnodeCancelGetNextConn(void *pIter); static int32_t mnodeGetStreamMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, void *pConn); -static void mnodeFreeConn(void *data, void* param1); +static void mnodeFreeConn(void *data); static int32_t mnodeProcessKillQueryMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessKillStreamMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessKillConnectionMsg(SMnodeMsg *pMsg); @@ -135,7 +135,7 @@ SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t po return pConn; } -static void mnodeFreeConn(void *data, void* param1) { +static void mnodeFreeConn(void *data) { SConnObj *pConn = data; tfree(pConn->pQueries); tfree(pConn->pStreams); diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 4e3c4797ac..bbfdb52e05 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -46,7 +46,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessConnectMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessUseMsg(SMnodeMsg *mnodeMsg); -static void mnodeFreeShowObj(void *data, void* param1); +static void mnodeFreeShowObj(void *data); static bool mnodeAccquireShowObj(SShowObj *pShow); static bool mnodeCheckShowFinished(SShowObj *pShow); static void *mnodePutShowObj(SShowObj *pShow); @@ -420,7 +420,7 @@ static void* mnodePutShowObj(SShowObj *pShow) { return NULL; } -static void mnodeFreeShowObj(void *data, void* param1) { +static void mnodeFreeShowObj(void *data) { SShowObj *pShow = *(SShowObj **)data; if (tsMnodeShowFreeIterFp[pShow->type] != NULL) { if (pShow->pVgIter != NULL) { diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index 7631c6d668..51adef11b9 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -29,7 +29,7 @@ #include "httpContext.h" #include "httpParser.h" -static void httpDestroyContext(void *data, void* param1); +static void httpDestroyContext(void *data); static void httpRemoveContextFromEpoll(HttpContext *pContext) { HttpThread *pThread = pContext->pThread; @@ -44,7 +44,7 @@ static void httpRemoveContextFromEpoll(HttpContext *pContext) { } } -static void httpDestroyContext(void *data, void* param1) { +static void httpDestroyContext(void *data) { HttpContext *pContext = *(HttpContext **)data; if (pContext->fd > 0) taosCloseSocket(pContext->fd); diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c index fd2415a5ab..2e1ee7df2f 100644 --- a/src/plugins/http/src/httpSession.c +++ b/src/plugins/http/src/httpSession.c @@ -95,7 +95,7 @@ void httpReleaseSession(HttpContext *pContext) { pContext->session = NULL; } -static void httpDestroySession(void *data, void* param1) { +static void httpDestroySession(void *data) { HttpSession *session = data; httpDebug("session:%p:%p, is destroyed, sessionRef:%d", session, session->taos, session->refCount); diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index dd7d77adb0..1460fbdc0f 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -40,7 +40,7 @@ static void queryMgmtKillQueryFn(void* handle, void* param1) { qKillQuery(*fp); } -static void freeqinfoFn(void *qhandle, void* param1) { +static void freeqinfoFn(void *qhandle) { void** handle = qhandle; if (handle == NULL || *handle == NULL) { return; diff --git a/src/tsdb/src/tsdbHealth.c b/src/tsdb/src/tsdbHealth.c index 44cc3ff423..8198c48033 100644 --- a/src/tsdb/src/tsdbHealth.c +++ b/src/tsdb/src/tsdbHealth.c @@ -48,8 +48,8 @@ int32_t tsdbInsertNewBlock(STsdbRepo * pRepo) { } // switch anther thread to run -void* cbKillQueryFree(void* param1) { - STsdbRepo* pRepo = (STsdbRepo*)param1; +void* cbKillQueryFree(void* param) { + STsdbRepo* pRepo = (STsdbRepo*)param; // vnode if(pRepo->appH.notifyStatus) { pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_NOBLOCK, TSDB_CODE_SUCCESS); diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index 0e0d1759a3..40069d7d27 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -32,7 +32,8 @@ extern "C" { #define TSDB_CACHE_PTR_TYPE int64_t #endif -typedef void (*__cache_free_fn_t)(void*, void*); +typedef void (*__cache_free_fn_t)(void*); +typedef void (*__cache_trav_fn_t)(void*, void*); typedef struct SCacheStatis { int64_t missCount; @@ -176,7 +177,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj); * @param fp * @return */ -void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp, void* param1); +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void* param1); /** * stop background refresh worker thread diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 4a4f795341..0fd29a67a3 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -140,7 +140,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo pCacheObj->name, pNode->key, pNode->data, pNode->size, size - 1, pCacheObj->totalSize); if (pCacheObj->freeFp) { - pCacheObj->freeFp(pNode->data, NULL); + pCacheObj->freeFp(pNode->data); } free(pNode); @@ -174,7 +174,7 @@ static FORCE_INLINE STrashElem* doRemoveElemInTrashcan(SCacheObj* pCacheObj, STr static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj* pCacheObj, STrashElem *pElem) { if (pCacheObj->freeFp) { - pCacheObj->freeFp(pElem->pData->data, NULL); + pCacheObj->freeFp(pElem->pData->data); } free(pElem->pData); @@ -249,7 +249,7 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v if (ret == 0) { if (T_REF_VAL_GET(p) == 0) { if (pCacheObj->freeFp) { - pCacheObj->freeFp(p->data, NULL); + pCacheObj->freeFp(p->data); } atomic_sub_fetch_64(&pCacheObj->totalSize, p->size); @@ -458,7 +458,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { pCacheObj->name, pNode->key, pNode->data, pNode->size, size, pCacheObj->totalSize); if (pCacheObj->freeFp) { - pCacheObj->freeFp(pNode->data, NULL); + pCacheObj->freeFp(pNode->data); } free(pNode); @@ -503,7 +503,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { typedef struct SHashTravSupp { SCacheObj* pCacheObj; int64_t time; - __cache_free_fn_t fp; + __cache_trav_fn_t fp; void* param1; } SHashTravSupp; @@ -671,7 +671,7 @@ bool travHashTableFn(void* param, void* data) { return true; } -static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t fp, void* param1) { +static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_trav_fn_t fp, void* param1) { assert(pCacheObj != NULL); SHashTravSupp sup = {.pCacheObj = pCacheObj, .fp = fp, .time = time, .param1 = param1}; @@ -755,7 +755,7 @@ void* taosCacheTimedRefresh(void *handle) { return NULL; } -void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp, void* param1) { +void taosCacheRefresh(SCacheObj *pCacheObj, __cache_trav_fn_t fp, void* param1) { if (pCacheObj == NULL) { return; } From a445670225a5a1d3084b34738ee24fec469cb5ba Mon Sep 17 00:00:00 2001 From: AlexDuan <417921451@qq.com> Date: Thu, 2 Sep 2021 10:37:13 +0800 Subject: [PATCH 22/60] modify global item count and add assert check --- src/common/src/tglobal.c | 25 ++++++++++++++----------- src/util/inc/tconfig.h | 2 +- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index f520952712..ca1c02eee5 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -1614,7 +1614,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); + // enable kill long query + cfg.option = "deadLockKillQuery"; + cfg.ptr = &tsDeadLockKillQuery; + cfg.valType = TAOS_CFG_VTYPE_INT8; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = 1; + cfg.ptrLength = 1; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; @@ -1668,18 +1678,11 @@ static void doInitGlobalConfig(void) { cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM); +#else + assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM - 5); #endif - // enable kill long query - cfg.option = "deadLockKillQuery"; - cfg.ptr = &tsDeadLockKillQuery; - cfg.valType = TAOS_CFG_VTYPE_INT8; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; - cfg.minValue = 0; - cfg.maxValue = 1; - cfg.ptrLength = 1; - cfg.unitType = TAOS_CFG_UTYPE_NONE; - taosInitConfigOption(cfg); } void taosInitGlobalCfg() { diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 00dc65aa3d..462b23697f 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 117 // 110 + 6(lossy) + 1(deadlock) +#define TSDB_CFG_MAX_NUM 122 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 From 5e5fe11f05d759980826f6c3fc542ccba9799f54 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 7 Sep 2021 14:07:45 +0800 Subject: [PATCH 23/60] [td-255] remove unused attributes of object. --- src/client/inc/tsclient.h | 15 ++------------- src/client/src/tscPrepare.c | 1 - src/client/src/tscSubquery.c | 1 - src/client/src/tscUtil.c | 17 +++-------------- 4 files changed, 5 insertions(+), 29 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index b8eb0a5286..ff796cdcbf 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -234,7 +234,6 @@ typedef struct STableDataBlocks { typedef struct { STableMeta *pTableMeta; SArray *vgroupIdList; -// SVgroupsInfo *pVgroupsInfo; } STableMetaVgroupInfo; typedef struct SInsertStatementParam { @@ -286,20 +285,14 @@ typedef struct { int32_t resColumnId; } SSqlCmd; -typedef struct SResRec { - int numOfRows; - int numOfTotal; -} SResRec; - typedef struct { int32_t numOfRows; // num of results in current retrieval - int64_t numOfRowsGroup; // num of results of current group int64_t numOfTotal; // num of total results int64_t numOfClauseTotal; // num of total result in current subclause char * pRsp; int32_t rspType; int32_t rspLen; - uint64_t qId; + uint64_t qId; // query id of SQInfo int64_t useconds; int64_t offset; // offset value from vnode during projection query of stable int32_t row; @@ -307,8 +300,6 @@ typedef struct { int16_t precision; bool completed; int32_t code; - int32_t numOfGroups; - SResRec * pGroupRec; char * data; TAOS_ROW tsrow; TAOS_ROW urow; @@ -316,8 +307,7 @@ typedef struct { char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) SColumnIndex* pColumnIndex; - TAOS_FIELD* final; - SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions + TAOS_FIELD* final; struct SGlobalMerger *pMerger; } SSqlRes; @@ -377,7 +367,6 @@ typedef struct SSqlObj { tsem_t rspSem; SSqlCmd cmd; SSqlRes res; - bool isBind; SSubqueryState subState; struct SSqlObj **pSubs; diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index bbddc4bff9..d0ac0ccf4e 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1491,7 +1491,6 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) { pSql->signature = pSql; pSql->pTscObj = pObj; pSql->maxRetry = TSDB_MAX_REPLICA; - pSql->isBind = true; pStmt->pSql = pSql; pStmt->last = STMT_INIT; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index edc3dbfc82..5be623dc94 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2879,7 +2879,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p pParentSql->res.precision = pSql->res.precision; pParentSql->res.numOfRows = 0; pParentSql->res.row = 0; - pParentSql->res.numOfGroups = 0; tscFreeRetrieveSup(pSql); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index fe3e330aa9..d66eb64fb5 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1347,14 +1347,7 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { tfree(pRes->buffer); tfree(pRes->urow); - tfree(pRes->pGroupRec); tfree(pRes->pColumnIndex); - - if (pRes->pArithSup != NULL) { - tfree(pRes->pArithSup->data); - tfree(pRes->pArithSup); - } - tfree(pRes->final); pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free @@ -3464,13 +3457,9 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM } pTableMetaInfo->pTableMeta = pTableMeta; - if (pTableMetaInfo->pTableMeta == NULL) { - pTableMetaInfo->tableMetaSize = 0; - } else { - pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta); - } + pTableMetaInfo->tableMetaSize = (pTableMetaInfo->pTableMeta == NULL)? 0:tscGetTableMetaSize(pTableMeta); + pTableMetaInfo->tableMetaCapacity = (size_t)(pTableMetaInfo->tableMetaSize); - if (vgroupList != NULL) { pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); @@ -3718,8 +3707,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput; + pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput; memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); } From f86ab16b8798a315136de0034976917589b8688a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 7 Sep 2021 14:58:12 +0800 Subject: [PATCH 24/60] [td-255] refactor for experiment purpose. --- src/client/inc/tscUtil.h | 6 ++++- src/client/src/tscSQLParser.c | 18 +++++++-------- src/client/src/tscServer.c | 22 ++++++++++++------- src/client/src/tscSql.c | 4 ++-- src/client/src/tscSubquery.c | 37 +++++++++++++++++-------------- src/client/src/tscUtil.c | 41 ++++++++++++++++++++--------------- src/inc/taosmsg.h | 12 +++++----- 7 files changed, 81 insertions(+), 59 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index ebd5de1ab3..cf2aadc107 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -92,7 +92,7 @@ typedef struct SMergeTsCtx { }SMergeTsCtx; typedef struct SVgroupTableInfo { - SVgroupInfo vgInfo; + SVgroupMsg vgInfo; SArray *itemList; // SArray } SVgroupTableInfo; @@ -288,7 +288,11 @@ void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo); SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo); void* tscVgroupInfoClear(SVgroupsInfo *pInfo); + +#if 0 void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src); +#endif + /** * The create object function must be successful expect for the out of memory issue. * diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 87b6b07652..20d14958c8 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8685,7 +8685,7 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod if (p->vgroupIdList != NULL) { size_t s = taosArrayGetSize(p->vgroupIdList); - size_t vgroupsz = sizeof(SVgroupInfo) * s + sizeof(SVgroupsInfo); + size_t vgroupsz = sizeof(SVgroupMsg) * s + sizeof(SVgroupsInfo); pTableMetaInfo->vgroupList = calloc(1, vgroupsz); if (pTableMetaInfo->vgroupList == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -8700,14 +8700,14 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod taosHashGetClone(tscVgroupMap, id, sizeof(*id), NULL, &existVgroupInfo); assert(existVgroupInfo.inUse >= 0); - SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; - - pVgroup->numOfEps = existVgroupInfo.numOfEps; - pVgroup->vgId = existVgroupInfo.vgId; - for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) { - pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port; - pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN); - } + SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; + memcpy(pVgroup, &existVgroupInfo, sizeof(SVgroupMsg)); +// pVgroup->numOfEps = existVgroupInfo.numOfEps; +// pVgroup->vgId = existVgroupInfo.vgId; +// for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) { +// pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port; +// pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN); +// } } } } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 9d523f2730..b713eeb858 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -73,7 +73,7 @@ static int32_t removeDupVgid(int32_t *src, int32_t sz) { return ret; } -static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) { +static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupMsg* pVgroupInfo) { assert(pEpSet != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0); // Issue the query to one of the vnode among a vgroup randomly. @@ -93,6 +93,7 @@ static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) { existed = true; } } + assert(existed); } @@ -723,7 +724,7 @@ static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STab int32_t index = pTableMetaInfo->vgroupIndex; assert(index >= 0); - SVgroupInfo* pVgroupInfo = NULL; + SVgroupMsg* pVgroupInfo = NULL; if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) { assert(index < pTableMetaInfo->vgroupList->numOfVgroups); pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; @@ -880,6 +881,10 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; + SQueryInfo *pQueryInfo = NULL; + STableMeta *pTableMeta = NULL; + STableMetaInfo *pTableMetaInfo = NULL; + int32_t code = TSDB_CODE_SUCCESS; int32_t size = tscEstimateQueryMsgSize(pSql); @@ -888,9 +893,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_TSC_INVALID_OPERATION; // todo add test for this } - SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; + pQueryInfo = tscGetQueryInfo(pCmd); + pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + pTableMeta = pTableMetaInfo->pTableMeta; SQueryAttr query = {{0}}; tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql); @@ -2146,7 +2151,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t *size = (int32_t)(sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsMsg)); - size_t vgroupsz = sizeof(SVgroupInfo) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); + size_t vgroupsz = sizeof(SVgroupMsg) * pVgroupMsg->numOfVgroups + sizeof(SVgroupsInfo); SVgroupsInfo *pVgroupInfo = calloc(1, vgroupsz); assert(pVgroupInfo != NULL); @@ -2156,7 +2161,7 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t } else { for (int32_t j = 0; j < pVgroupInfo->numOfVgroups; ++j) { // just init, no need to lock - SVgroupInfo *pVgroup = &pVgroupInfo->vgroups[j]; + SVgroupMsg *pVgroup = &pVgroupInfo->vgroups[j]; SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; vmsg->vgId = htonl(vmsg->vgId); @@ -2168,7 +2173,8 @@ static SVgroupsInfo* createVgroupInfoFromMsg(char* pMsg, int32_t* size, uint64_t pVgroup->vgId = vmsg->vgId; for (int32_t k = 0; k < vmsg->numOfEps; ++k) { pVgroup->epAddr[k].port = vmsg->epAddr[k].port; - pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); + tstrncpy(pVgroup->epAddr[k].fqdn, vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); +// pVgroup->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, TSDB_FQDN_LEN); } doUpdateVgroupInfo(pVgroup->vgId, vmsg); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 5fdaad0d66..faa1c2ff41 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -588,8 +588,8 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; tscDebug("0x%"PRIx64" send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql->self, sqlCmd[pCmd->command]); - tscBuildAndSendRequest(pSql, NULL); - return false; +// tscBuildAndSendRequest(pSql, NULL); +// return false; } return true; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 5be623dc94..1a88270b27 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -623,13 +623,12 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { int16_t colId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); // set the tag column id for executor to extract correct tag value -#ifndef _TD_NINGSI_60 - pExpr->base.param[0] = (tVariant) {.i64 = colId, .nType = TSDB_DATA_TYPE_BIGINT, .nLen = sizeof(int64_t)}; -#else - pExpr->base.param[0].i64 = colId; - pExpr->base.param[0].nType = TSDB_DATA_TYPE_BIGINT; - pExpr->base.param[0].nLen = sizeof(int64_t); -#endif + tVariant* pVariant = &pExpr->base.param[0]; + + pVariant->i64 = colId; + pVariant->nType = TSDB_DATA_TYPE_BIGINT; + pVariant->nLen = sizeof(int64_t); + pExpr->base.numOfParams = 1; } @@ -748,10 +747,12 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr SVgroupTableInfo info = {{0}}; for (int32_t m = 0; m < pvg->numOfVgroups; ++m) { if (tt->vgId == pvg->vgroups[m].vgId) { - tscSVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]); + memcpy(&info.vgInfo, &pvg->vgroups[m], sizeof(info.vgInfo)); +// tscSVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]); break; } } + assert(info.vgInfo.numOfEps != 0); vgTables = taosArrayInit(4, sizeof(STableIdInfo)); @@ -2459,7 +2460,7 @@ static void doSendQueryReqs(SSchedMsg* pSchedMsg) { tfree(p); } -static void doConcurrentlySendSubQueries(SSqlObj* pSql) { +static UNUSED_FUNC void doConcurrentlySendSubQueries(SSqlObj* pSql) { SSubqueryState *pState = &pSql->subState; // concurrently sent the query requests. @@ -2550,13 +2551,14 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { trs->pExtMemBuffer = pMemoryBuf; trs->pOrderDescriptor = pDesc; - trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); + trs->localBuffer = (tFilePage *)malloc(nBufferSize + sizeof(tFilePage)); if (trs->localBuffer == NULL) { tscError("0x%"PRIx64" failed to malloc buffer for local buffer, orderOfSub:%d, reason:%s", pSql->self, i, strerror(errno)); tfree(trs); break; } - + + trs->localBuffer->num = 0; trs->subqueryIndex = i; trs->pParentSql = pSql; @@ -2577,6 +2579,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { tscDebug("0x%"PRIx64" sub:0x%"PRIx64" create subquery success. orderOfSub:%d", pSql->self, pNew->self, trs->subqueryIndex); + + tfree(trs->localBuffer); + tfree(trs); } if (i < pState->numOfSub) { @@ -2594,7 +2599,8 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return pRes->code; } - doConcurrentlySendSubQueries(pSql); + pSql->fp(pSql->param, pSql, 0); +// doConcurrentlySendSubQueries(pSql); return TSDB_CODE_SUCCESS; } @@ -2651,7 +2657,7 @@ static int32_t tscReissueSubquery(SRetrieveSupport *oriTrs, SSqlObj *pSql, int32 int32_t subqueryIndex = trsupport->subqueryIndex; STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; + SVgroupMsg* pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]); @@ -2929,7 +2935,7 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR SSubqueryState* pState = &pParentSql->subState; STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; + SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[0]; if (pParentSql->res.code != TSDB_CODE_SUCCESS) { trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; @@ -3057,7 +3063,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { assert(pQueryInfo->numOfTables == 1); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0); - SVgroupInfo* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex]; + SVgroupMsg* pVgroup = &pTableMetaInfo->vgroupList->vgroups[trsupport->subqueryIndex]; // stable query killed or other subquery failed, all query stopped if (pParentSql->res.code != TSDB_CODE_SUCCESS) { @@ -3403,7 +3409,6 @@ static void doBuildResFromSubqueries(SSqlObj* pSql) { return; } -// tscRestoreFuncForSTableQuery(pQueryInfo); int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList); assert(numOfRes * rowSize > 0); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index d66eb64fb5..a83d3e62f5 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3362,11 +3362,11 @@ void tscFreeVgroupTableInfo(SArray* pVgroupTables) { size_t num = taosArrayGetSize(pVgroupTables); for (size_t i = 0; i < num; i++) { SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTables, i); - +#if 0 for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { tfree(pInfo->vgInfo.epAddr[j].fqdn); } - +#endif taosArrayDestroy(pInfo->itemList); } @@ -3380,9 +3380,9 @@ void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index) { assert(size > index); SVgroupTableInfo* pInfo = taosArrayGet(pVgroupTable, index); - for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { - tfree(pInfo->vgInfo.epAddr[j].fqdn); - } +// for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { +// tfree(pInfo->vgInfo.epAddr[j].fqdn); +// } taosArrayDestroy(pInfo->itemList); taosArrayRemove(pVgroupTable, index); @@ -3392,9 +3392,12 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) { memset(info, 0, sizeof(SVgroupTableInfo)); info->vgInfo = pInfo->vgInfo; + +#if 0 for(int32_t j = 0; j < pInfo->vgInfo.numOfEps; ++j) { info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn); } +#endif if (pInfo->itemList) { info->itemList = taosArrayDup(pInfo->itemList); @@ -3615,7 +3618,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNew->pTscObj = pSql->pTscObj; pNew->signature = pNew; - pNew->sqlstr = strdup(pSql->sqlstr); tsem_init(&pNew->rspSem, 0, 0); SSqlCmd* pnCmd = &pNew->cmd; @@ -3749,7 +3751,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); - } else { // transfer the ownership of pTableMeta to the newly create sql object. STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, 0); if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) { @@ -3759,8 +3760,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta); SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; - pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList, - pTableMetaInfo->pVgroupTables); + pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, + pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); } // this case cannot be happened @@ -4404,7 +4405,7 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { return NULL; } - size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupList->numOfVgroups; + size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupMsg) * vgroupList->numOfVgroups; SVgroupsInfo* pNew = calloc(1, size); if (pNew == NULL) { return NULL; @@ -4413,15 +4414,16 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { pNew->numOfVgroups = vgroupList->numOfVgroups; for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { - SVgroupInfo* pNewVInfo = &pNew->vgroups[i]; + SVgroupMsg* pNewVInfo = &pNew->vgroups[i]; - SVgroupInfo* pvInfo = &vgroupList->vgroups[i]; + SVgroupMsg* pvInfo = &vgroupList->vgroups[i]; pNewVInfo->vgId = pvInfo->vgId; pNewVInfo->numOfEps = pvInfo->numOfEps; for(int32_t j = 0; j < pvInfo->numOfEps; ++j) { - pNewVInfo->epAddr[j].fqdn = strdup(pvInfo->epAddr[j].fqdn); +// pNewVInfo->epAddr[j].fqdn = strdup(pvInfo->epAddr[j].fqdn); pNewVInfo->epAddr[j].port = pvInfo->epAddr[j].port; + tstrncpy(pNewVInfo->epAddr[j].fqdn, pvInfo->epAddr[j].fqdn, TSDB_FQDN_LEN); } } @@ -4433,8 +4435,9 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { return NULL; } +#if 0 for(int32_t i = 0; i < vgroupList->numOfVgroups; ++i) { - SVgroupInfo* pVgroupInfo = &vgroupList->vgroups[i]; + SVgroupMsg* pVgroupInfo = &vgroupList->vgroups[i]; for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) { tfree(pVgroupInfo->epAddr[j].fqdn); @@ -4445,10 +4448,11 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { } } +#endif tfree(vgroupList); return NULL; } - +# if 0 void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) { dst->vgId = src->vgId; dst->numOfEps = src->numOfEps; @@ -4461,6 +4465,8 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) { } } +#endif + char* serializeTagData(STagData* pTagData, char* pMsg) { int32_t n = (int32_t) strlen(pTagData->name); *(int32_t*) pMsg = htonl(n); @@ -4601,11 +4607,12 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta) { SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) { assert(pVgroupsInfo != NULL); - size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo); + size_t size = sizeof(SVgroupMsg) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo); SVgroupsInfo* pInfo = calloc(1, size); pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups; for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) { - tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]); + memcpy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m], sizeof(SVgroupMsg)); +// tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]); } return pInfo; } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 8f5269c158..616ee1d972 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -766,11 +766,11 @@ typedef struct SSTableVgroupMsg { int32_t numOfTables; } SSTableVgroupMsg, SSTableVgroupRspMsg; -typedef struct { - int32_t vgId; - int8_t numOfEps; - SEpAddr1 epAddr[TSDB_MAX_REPLICA]; -} SVgroupInfo; +//typedef struct { +// int32_t vgId; +// int8_t numOfEps; +// SEpAddr1 epAddr[TSDB_MAX_REPLICA]; +//} SVgroupInfo; typedef struct { int32_t vgId; @@ -780,7 +780,7 @@ typedef struct { typedef struct { int32_t numOfVgroups; - SVgroupInfo vgroups[]; + SVgroupMsg vgroups[]; } SVgroupsInfo; typedef struct { From 626cd4cc58810249bb65d53209543bd4bff537fb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 7 Sep 2021 17:06:07 +0800 Subject: [PATCH 25/60] [td-255] merge develop. --- src/client/src/tscUtil.c | 1 + src/util/src/tarray.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a83d3e62f5..75b7aecb21 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3618,6 +3618,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNew->pTscObj = pSql->pTscObj; pNew->signature = pNew; + pNew->sqlstr = strdup(pSql->sqlstr); tsem_init(&pNew->rspSem, 0, 0); SSqlCmd* pnCmd = &pNew->cmd; diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 2d6c513cb5..007ce06829 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -24,11 +24,12 @@ void* taosArrayInit(size_t size, size_t elemSize) { size = TARRAY_MIN_SIZE; } - SArray* pArray = calloc(1, sizeof(SArray)); + SArray* pArray = malloc(sizeof(SArray)); if (pArray == NULL) { return NULL; } + pArray->size = 0; pArray->pData = calloc(size, elemSize); if (pArray->pData == NULL) { free(pArray); From 258c221a44a2dd7db74a57362d58c483a40f55e0 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 7 Sep 2021 17:11:00 +0800 Subject: [PATCH 26/60] [td-255] refactor code. --- src/client/src/tscSQLParser.c | 11 ++++------- src/client/src/tscSql.c | 4 ++-- src/client/src/tscSubquery.c | 7 +------ src/inc/taosmsg.h | 6 ------ 4 files changed, 7 insertions(+), 21 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 20d14958c8..e4728a410a 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -8701,13 +8701,10 @@ static int32_t doLoadAllTableMeta(SSqlObj* pSql, SQueryInfo* pQueryInfo, SSqlNod assert(existVgroupInfo.inUse >= 0); SVgroupMsg *pVgroup = &pTableMetaInfo->vgroupList->vgroups[j]; - memcpy(pVgroup, &existVgroupInfo, sizeof(SVgroupMsg)); -// pVgroup->numOfEps = existVgroupInfo.numOfEps; -// pVgroup->vgId = existVgroupInfo.vgId; -// for (int32_t k = 0; k < existVgroupInfo.numOfEps; ++k) { -// pVgroup->epAddr[k].port = existVgroupInfo.ep[k].port; -// pVgroup->epAddr[k].fqdn = strndup(existVgroupInfo.ep[k].fqdn, TSDB_FQDN_LEN); -// } + + pVgroup->numOfEps = existVgroupInfo.numOfEps; + pVgroup->vgId = existVgroupInfo.vgId; + memcpy(&pVgroup->epAddr, &existVgroupInfo.ep, sizeof(pVgroup->epAddr)); } } } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index faa1c2ff41..5fdaad0d66 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -588,8 +588,8 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) { pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; tscDebug("0x%"PRIx64" send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql->self, sqlCmd[pCmd->command]); -// tscBuildAndSendRequest(pSql, NULL); -// return false; + tscBuildAndSendRequest(pSql, NULL); + return false; } return true; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 1a88270b27..8a52bc776d 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -748,7 +748,6 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr for (int32_t m = 0; m < pvg->numOfVgroups; ++m) { if (tt->vgId == pvg->vgroups[m].vgId) { memcpy(&info.vgInfo, &pvg->vgroups[m], sizeof(info.vgInfo)); -// tscSVgroupInfoCopy(&info.vgInfo, &pvg->vgroups[m]); break; } } @@ -2579,9 +2578,6 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { tscDebug("0x%"PRIx64" sub:0x%"PRIx64" create subquery success. orderOfSub:%d", pSql->self, pNew->self, trs->subqueryIndex); - - tfree(trs->localBuffer); - tfree(trs); } if (i < pState->numOfSub) { @@ -2599,8 +2595,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return pRes->code; } - pSql->fp(pSql->param, pSql, 0); -// doConcurrentlySendSubQueries(pSql); + doConcurrentlySendSubQueries(pSql); return TSDB_CODE_SUCCESS; } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 616ee1d972..3c1d89134c 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -766,12 +766,6 @@ typedef struct SSTableVgroupMsg { int32_t numOfTables; } SSTableVgroupMsg, SSTableVgroupRspMsg; -//typedef struct { -// int32_t vgId; -// int8_t numOfEps; -// SEpAddr1 epAddr[TSDB_MAX_REPLICA]; -//} SVgroupInfo; - typedef struct { int32_t vgId; int8_t numOfEps; From c0aff8098d64cb807682a8ce450d1d9ca87aee2a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 7 Sep 2021 17:16:56 +0800 Subject: [PATCH 27/60] [td-255]refactor. --- src/client/src/tscUtil.c | 3 +-- src/inc/taosmsg.h | 7 +------ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 75b7aecb21..a9b74ecb86 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4407,7 +4407,7 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { } size_t size = sizeof(SVgroupsInfo) + sizeof(SVgroupMsg) * vgroupList->numOfVgroups; - SVgroupsInfo* pNew = calloc(1, size); + SVgroupsInfo* pNew = malloc(size); if (pNew == NULL) { return NULL; } @@ -4422,7 +4422,6 @@ SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *vgroupList) { pNewVInfo->numOfEps = pvInfo->numOfEps; for(int32_t j = 0; j < pvInfo->numOfEps; ++j) { -// pNewVInfo->epAddr[j].fqdn = strdup(pvInfo->epAddr[j].fqdn); pNewVInfo->epAddr[j].port = pvInfo->epAddr[j].port; tstrncpy(pNewVInfo->epAddr[j].fqdn, pvInfo->epAddr[j].fqdn, TSDB_FQDN_LEN); } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 3c1d89134c..bb93c52142 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -775,12 +775,7 @@ typedef struct { typedef struct { int32_t numOfVgroups; SVgroupMsg vgroups[]; -} SVgroupsInfo; - -typedef struct { - int32_t numOfVgroups; - SVgroupMsg vgroups[]; -} SVgroupsMsg; +} SVgroupsMsg, SVgroupsInfo; typedef struct STableMetaMsg { int32_t contLen; From cc68a1414c47caf3c9bd7b77f5e4bf2c726c5df2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 7 Sep 2021 17:52:30 +0800 Subject: [PATCH 28/60] [td-255] update the threshold of concurrent launch query to be 4. --- src/client/src/tscSubquery.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 8a52bc776d..275042a238 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2459,11 +2459,11 @@ static void doSendQueryReqs(SSchedMsg* pSchedMsg) { tfree(p); } -static UNUSED_FUNC void doConcurrentlySendSubQueries(SSqlObj* pSql) { +static void doConcurrentlySendSubQueries(SSqlObj* pSql) { SSubqueryState *pState = &pSql->subState; // concurrently sent the query requests. - const int32_t MAX_REQUEST_PER_TASK = 8; + const int32_t MAX_REQUEST_PER_TASK = 4; int32_t numOfTasks = (pState->numOfSub + MAX_REQUEST_PER_TASK - 1)/MAX_REQUEST_PER_TASK; assert(numOfTasks >= 1); From 267e6e0ba6d292398eddfd09912250f2184bfcaa Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 8 Sep 2021 14:03:00 +0800 Subject: [PATCH 29/60] [td-255] avoid memset the allocated memory to improve the query performance. --- src/client/inc/tscUtil.h | 4 +++- src/client/src/tscServer.c | 3 ++- src/client/src/tscUtil.c | 30 ++++++++++++++++-------------- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index cf2aadc107..c858bd5867 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -174,7 +174,9 @@ void tscClearInterpInfo(SQueryInfo* pQueryInfo); bool tscIsInsertData(char* sqlstr); -int tscAllocPayload(SSqlCmd* pCmd, int size); +// the memory is not reset in case of fast allocate payload function +int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size); +int32_t tscAllocPayload(SSqlCmd* pCmd, int size); TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b713eeb858..6133bc4a9c 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -887,8 +887,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int32_t code = TSDB_CODE_SUCCESS; int32_t size = tscEstimateQueryMsgSize(pSql); + assert(size > 0); - if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) { + if (TSDB_CODE_SUCCESS != tscAllocPayloadFast(pCmd, size)) { tscError("%p failed to malloc for query msg", pSql); return TSDB_CODE_TSC_INVALID_OPERATION; // todo add test for this } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a9b74ecb86..b68bf6d83a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2080,32 +2080,34 @@ bool tscIsInsertData(char* sqlstr) { } while (1); } -int tscAllocPayload(SSqlCmd* pCmd, int size) { +int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size) { if (pCmd->payload == NULL) { assert(pCmd->allocSize == 0); - pCmd->payload = (char*)calloc(1, size); - if (pCmd->payload == NULL) { + pCmd->payload = malloc(size); + } else if (pCmd->allocSize < size) { + char* tmp = realloc(pCmd->payload, size); + if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } + pCmd->payload = tmp; pCmd->allocSize = size; - } else { - if (pCmd->allocSize < (uint32_t)size) { - char* b = realloc(pCmd->payload, size); - if (b == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } + } - pCmd->payload = b; - pCmd->allocSize = size; - } + assert(pCmd->allocSize >= size); + return TSDB_CODE_SUCCESS; +} +int32_t tscAllocPayload(SSqlCmd* pCmd, int size) { + assert(size > 0); + + int32_t code = tscAllocPayloadFast(pCmd, (size_t) size); + if (code == TSDB_CODE_SUCCESS) { memset(pCmd->payload, 0, pCmd->allocSize); } - assert(pCmd->allocSize >= (uint32_t)size && size > 0); - return TSDB_CODE_SUCCESS; + return code; } TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) { From f10ef79e5ef0d754e0c821bf223494cf9384ac19 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 8 Sep 2021 14:06:17 +0800 Subject: [PATCH 30/60] [td-255] fix a typo. --- src/client/src/tscUtil.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index b68bf6d83a..6a75ff5f09 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2085,6 +2085,7 @@ int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size) { assert(pCmd->allocSize == 0); pCmd->payload = malloc(size); + pCmd->allocSize = size; } else if (pCmd->allocSize < size) { char* tmp = realloc(pCmd->payload, size); if (tmp == NULL) { From da545a9974c3f0891892c69f392a7dfb1ce81913 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 8 Sep 2021 17:20:03 +0800 Subject: [PATCH 31/60] [td-6563] refactor the code to improve the client-side performance. --- src/client/src/tscServer.c | 8 +++++--- src/query/src/qExecutor.c | 8 ++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 6133bc4a9c..788574a837 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -862,8 +862,8 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, (*pMsg) += sizeof(SSqlExpr); for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log - pSqlExpr->param[j].nType = htons((uint16_t)pExpr->param[j].nType); - pSqlExpr->param[j].nLen = htons(pExpr->param[j].nLen); + pSqlExpr->param[j].nType = htonl(pExpr->param[j].nType); + pSqlExpr->param[j].nLen = htonl(pExpr->param[j].nLen); if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) { memcpy((*pMsg), pExpr->param[j].pz, pExpr->param[j].nLen); @@ -954,7 +954,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX); pQueryMsg->secondStageOutput = htonl(query.numOfExpr2); - pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number + pQueryMsg->numOfOutput = htons((int16_t)query.numOfOutput); // this is the stage one output column number pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols); pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); @@ -987,6 +987,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pCond->len; } + } else { + pQueryMsg->colCondLen = 0; } for (int32_t i = 0; i < query.numOfOutput; ++i) { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 8fefed51c8..727837012b 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -7600,8 +7600,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += sizeof(SSqlExpr); for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { - pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType); - pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen); + pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType); + pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen); if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) { pExprMsg->param[j].pz = pMsg; @@ -7648,8 +7648,8 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += sizeof(SSqlExpr); for (int32_t j = 0; j < pExprMsg->numOfParams; ++j) { - pExprMsg->param[j].nType = htons(pExprMsg->param[j].nType); - pExprMsg->param[j].nLen = htons(pExprMsg->param[j].nLen); + pExprMsg->param[j].nType = htonl(pExprMsg->param[j].nType); + pExprMsg->param[j].nLen = htonl(pExprMsg->param[j].nLen); if (pExprMsg->param[j].nType == TSDB_DATA_TYPE_BINARY) { pExprMsg->param[j].pz = pMsg; From f14fd21aa560b7780efbc1c16920d2ee52544f71 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 8 Sep 2021 19:31:42 +0800 Subject: [PATCH 32/60] [td-6563]remove duplicated code. --- src/tsdb/src/tsdbRead.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index c1b935e0ee..747b22a7a8 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -288,8 +288,6 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa STableKeyInfo* pKeyInfo = (STableKeyInfo*) taosArrayGet(group, j); STableCheckInfo info = { .lastKey = pKeyInfo->lastKey, .pTableObj = pKeyInfo->pTable }; - info.tableId = ((STable*)(pKeyInfo->pTable))->tableId; - assert(info.pTableObj != NULL && (info.pTableObj->type == TSDB_NORMAL_TABLE || info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); From 7ed1aa261822f00cc0d0c98ef99b94c5d88c1cbb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 8 Sep 2021 19:35:29 +0800 Subject: [PATCH 33/60] [td-6563] --- src/tsdb/src/tsdbRead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 747b22a7a8..5a91d8f790 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2216,7 +2216,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO SBlock* pBlock = pTableCheck->pCompInfo->blocks; sup.numOfBlocksPerTable[numOfQualTables] = pTableCheck->numOfBlocks; - char* buf = calloc(1, sizeof(STableBlockInfo) * pTableCheck->numOfBlocks); + char* buf = malloc(sizeof(STableBlockInfo) * pTableCheck->numOfBlocks); if (buf == NULL) { cleanBlockOrderSupporter(&sup, numOfQualTables); return TSDB_CODE_TDB_OUT_OF_MEMORY; From a4260aeb32b2a64f2ac305e4555d9512fa17a2e1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 9 Sep 2021 10:31:34 +0800 Subject: [PATCH 34/60] [td-6563] --- src/tsdb/inc/tsdbMeta.h | 2 +- src/tsdb/src/tsdbRead.c | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 51801c843c..8ce5e7ade8 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -100,7 +100,7 @@ static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *k } static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t _version) { - STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; + STable* pDTable = (pTable->pSuper != NULL) ? pTable->pSuper : pTable; // for performance purpose STSchema* pSchema = NULL; STSchema* pTSchema = NULL; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 5a91d8f790..14c5a04ece 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -3616,8 +3616,6 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC for(int32_t i = 0; i < size; ++i) { STableKeyInfo *pKeyInfo = taosArrayGet(pTableList, i); - assert(((STable*)pKeyInfo->pTable)->type == TSDB_CHILD_TABLE); - tsdbRefTable(pKeyInfo->pTable); STableKeyInfo info = {.pTable = pKeyInfo->pTable, .lastKey = skey}; From 616f32e018a7ca40b9617177f7ef7cb74be456a9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 9 Sep 2021 15:36:17 +0800 Subject: [PATCH 35/60] [td-6563] remove the calloc to improve the query performance. --- src/util/src/hash.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/util/src/hash.c b/src/util/src/hash.c index a22ce34a0e..6577a0a0f4 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -741,17 +741,19 @@ void taosHashTableResize(SHashObj *pHashObj) { } SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) { - SHashNode *pNewNode = calloc(1, sizeof(SHashNode) + keyLen + dsize); + SHashNode *pNewNode = malloc(sizeof(SHashNode) + keyLen + dsize); if (pNewNode == NULL) { uError("failed to allocate memory, reason:%s", strerror(errno)); return NULL; } - pNewNode->keyLen = (uint32_t)keyLen; + pNewNode->keyLen = (uint32_t)keyLen; pNewNode->hashVal = hashVal; pNewNode->dataLen = (uint32_t) dsize; - pNewNode->count = 1; + pNewNode->count = 1; + pNewNode->removed = 0; + pNewNode->next = NULL; memcpy(GET_HASH_NODE_DATA(pNewNode), pData, dsize); memcpy(GET_HASH_NODE_KEY(pNewNode), key, keyLen); From ad0b62328cfa15773dc6a2684024135dba316f82 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 9 Sep 2021 15:45:32 +0800 Subject: [PATCH 36/60] [td-6563] --- src/util/inc/tlosertree.h | 5 ++--- src/util/src/tlosertree.c | 5 +++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/util/inc/tlosertree.h b/src/util/inc/tlosertree.h index 4c731625dd..58f2ca8c5c 100644 --- a/src/util/inc/tlosertree.h +++ b/src/util/inc/tlosertree.h @@ -26,7 +26,7 @@ typedef int (*__merge_compare_fn_t)(const void *, const void *, void *param); typedef struct SLoserTreeNode { int32_t index; - void * pData; + void *pData; } SLoserTreeNode; typedef struct SLoserTreeInfo { @@ -34,8 +34,7 @@ typedef struct SLoserTreeInfo { int32_t totalEntries; __merge_compare_fn_t comparFn; void * param; - - SLoserTreeNode *pNode; + SLoserTreeNode *pNode; } SLoserTreeInfo; uint32_t tLoserTreeCreate(SLoserTreeInfo **pTree, int32_t numOfEntries, void *param, __merge_compare_fn_t compareFn); diff --git a/src/util/src/tlosertree.c b/src/util/src/tlosertree.c index e793548407..0f104c4b63 100644 --- a/src/util/src/tlosertree.c +++ b/src/util/src/tlosertree.c @@ -90,12 +90,13 @@ void tLoserTreeAdjust(SLoserTreeInfo* pTree, int32_t idx) { SLoserTreeNode kLeaf = pTree->pNode[idx]; while (parentId > 0) { - if (pTree->pNode[parentId].index == -1) { + SLoserTreeNode* pCur = &pTree->pNode[parentId]; + if (pCur->index == -1) { pTree->pNode[parentId] = kLeaf; return; } - int32_t ret = pTree->comparFn(&pTree->pNode[parentId], &kLeaf, pTree->param); + int32_t ret = pTree->comparFn(pCur, &kLeaf, pTree->param); if (ret < 0) { SLoserTreeNode t = pTree->pNode[parentId]; pTree->pNode[parentId] = kLeaf; From 671c99683b0282ff88febeb96581420f80852409 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 9 Sep 2021 15:48:38 +0800 Subject: [PATCH 37/60] [td-6563] --- src/query/src/qExecutor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 727837012b..ddc14f4cef 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2107,7 +2107,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->pQueryAttr = pQueryAttr; pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables * 10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES); pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); From 555208eb06852806fd5813209789d872b9397ac2 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Thu, 9 Sep 2021 16:59:43 +0800 Subject: [PATCH 38/60] [TD-5661]: let all connector test case for nano seconds running --- Jenkinsfile | 7 + .../C#Test/nanosupport/TDengineDriver.cs | 170 +++++ .../C#Test/nanosupport/nanotest.cs | 502 +++++++++++++++ .../nodejsTest/nanosupport/nanosecondTest.js | 290 +++++++++ .../nodejsTest/nodetaos/cinterface.js | 587 ++++++++++++++++++ .../nodejsTest/nodetaos/connection.js | 84 +++ .../nodejsTest/nodetaos/constants.js | 76 +++ .../nodejsTest/nodetaos/cursor.js | 476 ++++++++++++++ .../nodejsTest/nodetaos/error.js | 96 +++ .../nodejsTest/nodetaos/globalfunc.js | 14 + .../nodejsTest/nodetaos/taosobjects.js | 152 +++++ .../nodejsTest/nodetaos/taosquery.js | 112 ++++ .../nodejsTest/nodetaos/taosresult.js | 85 +++ tests/connectorTest/nodejsTest/readme.md | 161 +++++ tests/connectorTest/nodejsTest/tdengine.js | 4 + .../nodejsTest/test/nanosecondTest.js | 351 +++++++++++ .../nodejsTest/test/performance.js | 89 +++ tests/connectorTest/nodejsTest/test/test.js | 170 +++++ .../nodejsTest/test/testMicroseconds.js | 49 ++ .../nodejsTest/test/testNanoseconds.js | 49 ++ .../nodejsTest/test/testSubscribe.js | 16 + .../odbcTest/nanosupport/nanoTest_odbc.py | 111 ++++ .../odbcTest/nanosupport/odbc.go | 84 +++ .../odbcTest/nanosupport/odbc.py | 115 ++++ tests/gotest/batchtest.bat | 8 + tests/gotest/batchtest.sh | 1 + tests/gotest/case001/case001.go | 1 - tests/gotest/case001/case001.sh | 5 +- tests/gotest/case002/case002.bat | 2 +- tests/gotest/case002/case002.go | 5 +- tests/gotest/case002/case002.sh | 2 +- .../gotest/nanosupport/connector/executor.go | 208 +++++++ tests/gotest/nanosupport/nanoCase.bat | 9 + tests/gotest/nanosupport/nanoCase.sh | 22 + tests/gotest/nanosupport/nanosupport.go | 269 ++++++++ 35 files changed, 4373 insertions(+), 9 deletions(-) create mode 100644 tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs create mode 100644 tests/connectorTest/C#Test/nanosupport/nanotest.cs create mode 100644 tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/cinterface.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/connection.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/constants.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/cursor.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/error.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/globalfunc.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/taosobjects.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/taosquery.js create mode 100644 tests/connectorTest/nodejsTest/nodetaos/taosresult.js create mode 100644 tests/connectorTest/nodejsTest/readme.md create mode 100644 tests/connectorTest/nodejsTest/tdengine.js create mode 100644 tests/connectorTest/nodejsTest/test/nanosecondTest.js create mode 100644 tests/connectorTest/nodejsTest/test/performance.js create mode 100644 tests/connectorTest/nodejsTest/test/test.js create mode 100644 tests/connectorTest/nodejsTest/test/testMicroseconds.js create mode 100644 tests/connectorTest/nodejsTest/test/testNanoseconds.js create mode 100644 tests/connectorTest/nodejsTest/test/testSubscribe.js create mode 100644 tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py create mode 100644 tests/connectorTest/odbcTest/nanosupport/odbc.go create mode 100644 tests/connectorTest/odbcTest/nanosupport/odbc.py create mode 100644 tests/gotest/nanosupport/connector/executor.go create mode 100644 tests/gotest/nanosupport/nanoCase.bat create mode 100644 tests/gotest/nanosupport/nanoCase.sh create mode 100644 tests/gotest/nanosupport/nanosupport.go diff --git a/Jenkinsfile b/Jenkinsfile index 91855a92fb..4a584fbb35 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -235,11 +235,18 @@ pipeline { npm install td2.0-connector > /dev/null 2>&1 node nodejsChecker.js host=localhost node test1970.js + cd ${WKC}/tests/connectorTest/nodejsTest/nanosupport + npm install td2.0-connector > /dev/null 2>&1 + node nanosecondTest.js + ''' sh ''' cd ${WKC}/tests/examples/C#/taosdemo mcs -out:taosdemo *.cs > /dev/null 2>&1 echo '' |./taosdemo -c /etc/taos + cd ${WKC}/tests/connectorTest/C#Test/nanosupport + mcs -out:nano *.cs > /dev/null 2>&1 + echo '' |./nano ''' sh ''' cd ${WKC}/tests/gotest diff --git a/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs b/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs new file mode 100644 index 0000000000..e6c3a598ad --- /dev/null +++ b/tests/connectorTest/C#Test/nanosupport/TDengineDriver.cs @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace TDengineDriver +{ + enum TDengineDataType + { + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10, // unicode string + TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte + TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes + TSDB_DATA_TYPE_UINT = 13, // 4 bytes + TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes + } + + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } + + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOL"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "TINYINT"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SMALLINT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "BIGINT"; + case TDengineDataType.TSDB_DATA_TYPE_UTINYINT: + return "TINYINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_USMALLINT: + return "SMALLINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UINT: + return "INT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_UBIGINT: + return "BIGINT UNSIGNED"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } + + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; + + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + //get precisionin parameter restultset + [DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)] + static extern public int ResultPrecision(IntPtr taos); + } +} diff --git a/tests/connectorTest/C#Test/nanosupport/nanotest.cs b/tests/connectorTest/C#Test/nanosupport/nanotest.cs new file mode 100644 index 0000000000..b9eaefef8c --- /dev/null +++ b/tests/connectorTest/C#Test/nanosupport/nanotest.cs @@ -0,0 +1,502 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Text; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Collections; +namespace TDengineDriver +{ + class TDengineNanoTest + { + //connect parameters + private string host="localhost"; + private string configDir="/etc/taos"; + private string user="root"; + private string password="taosdata"; + private short port = 0; + + //sql parameters + private string dbName; + private string tbName; + private string precision; + + private bool isInsertData; + private bool isQueryData; + + private long tableCount; + private long totalRows; + private long batchRows; + private long beginTimestamp = 1551369600000L; + + private IntPtr conn = IntPtr.Zero; + private long rowsInserted = 0; + + static void Main(string[] args) + { + TDengineNanoTest tester = new TDengineNanoTest(); + //tester.ReadArgument(args); + + tester.InitTDengine(); + tester.ConnectTDengine(); + tester.execute("reset query cache"); + tester.execute("drop database if exists db"); + tester.execute("create database db precision 'ns'"); + tester.executeQuery("show databases;"); + //tester.checkData(0,16,"ns"); + tester.execute("use db"); + + Console.WriteLine("testing nanosecond support in 1st timestamp"); + tester.execute("create table tb (ts timestamp, speed int)"); + tester.execute("insert into tb values('2021-06-10 0:00:00.100000001', 1);"); + tester.execute("insert into tb values(1623254400150000000, 2);"); + tester.execute("import into tb values(1623254400300000000, 3);"); + tester.execute("import into tb values(1623254400299999999, 4);"); + tester.execute("insert into tb values(1623254400300000001, 5);"); + tester.execute("insert into tb values(1623254400999999999, 7);"); + tester.executeQuery("select * from tb;"); + + Console.WriteLine("expect data is "); + + tester.executeQuery("select * from tb;"); + + // Console.WriteLine("expected is : {0}", width); + // tdSql.checkData(0,0,"2021-06-10 0:00:00.100000001"); + // tdSql.checkData(1,0,"2021-06-10 0:00:00.150000000"); + // tdSql.checkData(2,0,"2021-06-10 0:00:00.299999999"); + // tdSql.checkData(3,1,3); + // tdSql.checkData(4,1,5); + // tdSql.checkData(5,1,7); + // tdSql.checkRows(6); + + tester.executeQuery("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts > '2021-06-10 0:00:00.100000001' and ts < '2021-06-10 0:00:00.160000000';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts > '2021-06-10 0:00:00.100000000' and ts < '2021-06-10 0:00:00.150000000';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts > 1623254400400000000;"); + Console.WriteLine("expected is : 1 " ); + tester.executeQuery("select count(*) from tb where ts < '2021-06-10 00:00:00.400000000';"); + Console.WriteLine("expected is : 5 " ); + + tester.executeQuery("select count(*) from tb where ts > now + 400000000b;"); + Console.WriteLine("expected is : 0 " ); + + tester.executeQuery("select count(*) from tb where ts >= '2021-06-10 0:00:00.100000001';"); + Console.WriteLine("expected is : 6 " ); + + tester.executeQuery("select count(*) from tb where ts <= 1623254400300000000;"); + Console.WriteLine("expected is : 4 " ); + + tester.executeQuery("select count(*) from tb where ts = '2021-06-10 0:00:00.000000000';"); + Console.WriteLine("expected is : 0 " ); + + tester.executeQuery("select count(*) from tb where ts = 1623254400150000000;"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts = '2021-06-10 0:00:00.100000001';"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;"); + Console.WriteLine("expected is : 5 " ); + + tester.executeQuery("select count(*) from tb where ts between '2021-06-10 0:00:00.299999999' and '2021-06-10 0:00:00.300000001';"); + Console.WriteLine("expected is : 3 " ); + + tester.executeQuery("select avg(speed) from tb interval(5000000000b);"); + Console.WriteLine("expected is : 1 " ); + + tester.executeQuery("select avg(speed) from tb interval(100000000b)"); + Console.WriteLine("expected is : 4 " ); + + // tdSql.error("select avg(speed) from tb interval(1b);") + // tdSql.error("select avg(speed) from tb interval(999b);") + + tester.executeQuery("select avg(speed) from tb interval(1000b);"); + Console.WriteLine("expected is : 5 rows " ); + + tester.executeQuery("select avg(speed) from tb interval(1u);"); + Console.WriteLine("expected is : 5 rows " ); + + tester.executeQuery("select avg(speed) from tb interval(100000000b) sliding (100000000b);"); + Console.WriteLine("expected is : 4 rows " ); + + tester.executeQuery("select last(*) from tb"); + Console.WriteLine("expected is :1623254400999999999 " ); + + // tdSql.checkData(0,0, "2021-06-10 0:00:00.999999999") + // tdSql.checkData(0,0, 1623254400999999999) + + tester.executeQuery("select first(*) from tb"); + Console.WriteLine("expected is : 1623254400100000001" ); + // tdSql.checkData(0,0, 1623254400100000001); + // tdSql.checkData(0,0, "2021-06-10 0:00:00.100000001"); + + tester.execute("insert into tb values(now + 500000000b, 6);"); + tester.executeQuery("select * from tb;"); + // tdSql.checkRows(7); + + tester.execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);"); + tester.execute("insert into tb2 values('2021-06-10 0:00:00.100000001', 1, '2021-06-11 0:00:00.100000001');"); + tester.execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);"); + tester.execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);"); + tester.execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);"); + tester.execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);"); + tester.execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);"); + + tester.executeQuery("select * from tb2;"); + // tdSql.checkData(0,0,"2021-06-10 0:00:00.100000001"); + // tdSql.checkData(1,0,"2021-06-10 0:00:00.150000000"); + // tdSql.checkData(2,1,4); + // tdSql.checkData(3,1,3); + // tdSql.checkData(4,2,"2021-06-11 00:00:00.300000001"); + // tdSql.checkData(5,2,"2021-06-13 00:00:00.999999999"); + // tdSql.checkRows(6); + tester.executeQuery("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 > '2021-06-11 0:00:00.100000000' and ts2 < '2021-06-11 0:00:00.100000002';"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 > 1623340800500000000;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + tester.executeQuery("select count(*) from tb2 where ts2 < '2021-06-11 0:00:00.400000000';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 > now + 400000000b;"); + Console.WriteLine("expected is : 0 " ); + // tdSql.checkRows(0); + + tester.executeQuery("select count(*) from tb2 where ts2 >= '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.executeQuery("select count(*) from tb2 where ts2 <= 1623340800400000000;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 = '2021-06-11 0:00:00.000000000';"); + Console.WriteLine("expected is : 0 " ); + // tdSql.checkRows(0); + + tester.executeQuery("select count(*) from tb2 where ts2 = '2021-06-11 0:00:00.300000001';"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 = 1623340800300000001;"); + Console.WriteLine("expected is : 1 " ); + // tdSql.checkData(0,0,1); + + tester.executeQuery("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 between '2021-06-11 0:00:00.299999999' and '2021-06-11 0:00:00.300000001';"); + Console.WriteLine("expected is : 3 " ); + // tdSql.checkData(0,0,3); + + tester.executeQuery("select count(*) from tb2 where ts2 <> 1623513600999999999;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 <> '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 <> '2021-06-11 0:00:00.100000000';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.executeQuery("select count(*) from tb2 where ts2 != 1623513600999999999;"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 != '2021-06-11 0:00:00.100000001';"); + Console.WriteLine("expected is : 5 " ); + // tdSql.checkData(0,0,5); + + tester.executeQuery("select count(*) from tb2 where ts2 != '2021-06-11 0:00:00.100000000';"); + Console.WriteLine("expected is : 6 " ); + // tdSql.checkData(0,0,6); + + tester.execute("insert into tb2 values(now + 500000000b, 6, now +2d);"); + tester.executeQuery("select * from tb2;"); + Console.WriteLine("expected is : 7 rows" ); + // tdSql.checkRows(7); + + // tdLog.debug("testing ill nanosecond format handling"); + tester.execute("create table tb3 (ts timestamp, speed int);"); + // tdSql.error("insert into tb3 values(16232544001500000, 2);"); + tester.execute("insert into tb3 values('2021-06-10 0:00:00.123456', 2);"); + tester.executeQuery("select * from tb3 where ts = '2021-06-10 0:00:00.123456000';"); + // tdSql.checkRows(1); + Console.WriteLine("expected is : 1 rows " ); + + tester.execute("insert into tb3 values('2021-06-10 0:00:00.123456789000', 2);"); + tester.executeQuery("select * from tb3 where ts = '2021-06-10 0:00:00.123456789';"); + // tdSql.checkRows(1); + Console.WriteLine("expected is : 1 rows " ); + + // check timezone support + Console.WriteLine("nsdb" ); + tester.execute("drop database if exists nsdb;"); + tester.execute("create database nsdb precision 'ns';"); + tester.execute("use nsdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + Console.WriteLine("expected is : 1623258000123456789 " ); + // tdSql.checkData(0,0,1623258000123456789); + + + + Console.WriteLine("usdb" ); + tester.execute("create database usdb precision 'us';"); + tester.execute("use usdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + + Console.WriteLine("expected is : 1623258000123456 " ); + + Console.WriteLine("msdb" ); + tester.execute("drop database if exists msdb;"); + tester.execute("create database msdb precision 'ms';"); + tester.execute("use msdb;"); + tester.execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);"); + tester.execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);" ); + tester.executeQuery("select first(*) from tb1;"); + Console.WriteLine("expected is : 1623258000123 " ); + + + + tester.CloseConnection(); + tester.cleanup(); + + + } + + public void InitTDengine() + { + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_CONFIGDIR, this.configDir); + TDengine.Options((int)TDengineInitOption.TDDB_OPTION_SHELL_ACTIVITY_TIMER, "60"); + Console.WriteLine("init..."); + TDengine.Init(); + Console.WriteLine("get connection starting..."); + } + + public void ConnectTDengine() + { + string db = ""; + this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); + if (this.conn == IntPtr.Zero) + { + Console.WriteLine("connection failed: " + this.host); + ExitProgram(); + } + else + { + Console.WriteLine("[ OK ] Connection established."); + } + } + //EXECUTE SQL + public void execute(string sql) + { + DateTime dt1 = DateTime.Now; + IntPtr res = TDengine.Query(this.conn, sql.ToString()); + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + else + { + Console.WriteLine(sql.ToString() + " success"); + } + TDengine.FreeResult(res); + } + //EXECUTE QUERY + public void executeQuery(string sql) + { + + DateTime dt1 = DateTime.Now; + long queryRows = 0; + IntPtr res = TDengine.Query(conn, sql); + getPrecision(res); + if ((res == IntPtr.Zero) || (TDengine.ErrorNo(res) != 0)) + { + Console.Write(sql.ToString() + " failure, "); + if (res != IntPtr.Zero) { + Console.Write("reason: " + TDengine.Error(res)); + } + Console.WriteLine(""); + ExitProgram(); + } + DateTime dt2 = DateTime.Now; + TimeSpan span = dt2 - dt1; + Console.WriteLine("[OK] time cost: " + span.ToString() + "ms, execute statement ====> " + sql.ToString()); + int fieldCount = TDengine.FieldCount(res); + + List metas = TDengine.FetchFields(res); + for (int j = 0; j < metas.Count; j++) + { + TDengineMeta meta = (TDengineMeta)metas[j]; + } + + IntPtr rowdata; + StringBuilder builder = new StringBuilder(); + while ((rowdata = TDengine.FetchRows(res)) != IntPtr.Zero) + { + queryRows++; + for (int fields = 0; fields < fieldCount; ++fields) + { + TDengineMeta meta = metas[fields]; + int offset = IntPtr.Size * fields; + IntPtr data = Marshal.ReadIntPtr(rowdata, offset); + + builder.Append("---"); + + if (data == IntPtr.Zero) + { + builder.Append("NULL"); + continue; + } + + switch ((TDengineDataType)meta.type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + bool v1 = Marshal.ReadByte(data) == 0 ? false : true; + builder.Append(v1); + break; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + byte v2 = Marshal.ReadByte(data); + builder.Append(v2); + break; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + short v3 = Marshal.ReadInt16(data); + builder.Append(v3); + break; + case TDengineDataType.TSDB_DATA_TYPE_INT: + int v4 = Marshal.ReadInt32(data); + builder.Append(v4); + break; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + long v5 = Marshal.ReadInt64(data); + builder.Append(v5); + break; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + float v6 = (float)Marshal.PtrToStructure(data, typeof(float)); + builder.Append(v6); + break; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + double v7 = (double)Marshal.PtrToStructure(data, typeof(double)); + builder.Append(v7); + break; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + string v8 = Marshal.PtrToStringAnsi(data); + builder.Append(v8); + break; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + long v9 = Marshal.ReadInt64(data); + builder.Append(v9); + break; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + string v10 = Marshal.PtrToStringAnsi(data); + builder.Append(v10); + break; + } + } + builder.Append("---"); + + if (queryRows <= 10) + { + Console.WriteLine(builder.ToString()); + } + builder.Clear(); + } + + if (TDengine.ErrorNo(res) != 0) + { + Console.Write("Query is not complete, Error {0:G}", TDengine.ErrorNo(res), TDengine.Error(res)); + } + Console.WriteLine(""); + + TDengine.FreeResult(res); + + } + + public void CloseConnection() + { + if (this.conn != IntPtr.Zero) + { + TDengine.Close(this.conn); + Console.WriteLine("connection closed."); + } + } + + static void ExitProgram() + { + System.Environment.Exit(0); + } + + public void cleanup() + { + Console.WriteLine("clean up..."); + System.Environment.Exit(0); + } + + // method to get db precision + public void getPrecision(IntPtr res) + { + int psc=TDengine.ResultPrecision(res); + switch(psc) + { + case 0: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"millisecond"); + break; + case 1: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"microsecond"); + break; + case 2: + Console.WriteLine("db:[{0:G}]'s precision is {1:G}",this.dbName,"nanosecond"); + break; + } + + } + + // public void checkData(int x ,int y , long ts ){ + + // } + + } +} + diff --git a/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js b/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js new file mode 100644 index 0000000000..11812ac84b --- /dev/null +++ b/tests/connectorTest/nodejsTest/nanosupport/nanosecondTest.js @@ -0,0 +1,290 @@ +const taos = require('td2.0-connector'); +var conn = taos.connect({host:"localhost", user:"root", password:"taosdata", config:"/etc/taos",port:6030}) +var c1 = conn.cursor(); + + +function checkData(sql,row,col,data){ + + + console.log(sql) + c1.execute(sql) + var d = c1.fetchall(); + let checkdata = d[row][col]; + if (checkdata == data) { + + console.log('check pass') + } + else{ + console.log('check failed') + console.log('checked is :',checkdata) + console.log("expected is :",data) + + + } +} + + +// nano basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists db') +c1.execute('create database db precision "ns";') +c1.execute('use db'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.100000001\', 1);') +c1.execute('insert into tb values(1623254400150000000, 2);') +c1.execute('import into tb values(1623254400300000000, 3);') +c1.execute('import into tb values(1623254400299999999, 4);') +c1.execute('insert into tb values(1623254400300000001, 5);') +c1.execute('insert into tb values(1623254400999999999, 7);') +c1.execute('insert into tb values(1623254400123456789, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') +console.log('this is area about checkdata result') +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.100000001') +checkData(sql,1,0,'2021-06-10 00:00:00.123456789') +checkData(sql,2,0,'2021-06-10 00:00:00.150000000') +checkData(sql,3,0,'2021-06-10 00:00:00.299999999') +checkData(sql,4,0,'2021-06-10 00:00:00.300000000') +checkData(sql,5,0,'2021-06-10 00:00:00.300000001') +checkData(sql,6,0,'2021-06-10 00:00:00.999999999') +checkData(sql,0,1,1) +checkData(sql,1,1,8) +checkData(sql,2,1,2) +checkData(sql,5,1,5) + + + +// us basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists usdb') +c1.execute('create database usdb precision "us";') +c1.execute('use usdb'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.100001\', 1);') +c1.execute('insert into tb values(1623254400150000, 2);') +c1.execute('import into tb values(1623254400300000, 3);') +c1.execute('import into tb values(1623254400299999, 4);') +c1.execute('insert into tb values(1623254400300001, 5);') +c1.execute('insert into tb values(1623254400999999, 7);') +c1.execute('insert into tb values(1623254400123789, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') + +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.100001') +checkData(sql,1,0,'2021-06-10 00:00:00.123789') +checkData(sql,2,0,'2021-06-10 00:00:00.150000') +checkData(sql,3,0,'2021-06-10 00:00:00.299999') +checkData(sql,4,0,'2021-06-10 00:00:00.300000') +checkData(sql,5,0,'2021-06-10 00:00:00.300001') +checkData(sql,6,0,'2021-06-10 00:00:00.999999') +checkData(sql,0,1,1) +checkData(sql,1,1,8) +checkData(sql,2,1,2) +checkData(sql,5,1,5) + +console.log('*******************************************') + +// ms basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists msdb') +c1.execute('create database msdb precision "ms";') +c1.execute('use msdb'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.101\', 1);') +c1.execute('insert into tb values(1623254400150, 2);') +c1.execute('import into tb values(1623254400300, 3);') +c1.execute('import into tb values(1623254400299, 4);') +c1.execute('insert into tb values(1623254400301, 5);') +c1.execute('insert into tb values(1623254400789, 7);') +c1.execute('insert into tb values(1623254400999, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') +console.log('this is area about checkdata result') +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.101') +checkData(sql,1,0,'2021-06-10 00:00:00.150') +checkData(sql,2,0,'2021-06-10 00:00:00.299') +checkData(sql,3,0,'2021-06-10 00:00:00.300') +checkData(sql,4,0,'2021-06-10 00:00:00.301') +checkData(sql,5,0,'2021-06-10 00:00:00.789') +checkData(sql,6,0,'2021-06-10 00:00:00.999') +checkData(sql,0,1,1) +checkData(sql,1,1,2) +checkData(sql,2,1,4) +checkData(sql,5,1,7) + +console.log('*******************************************') + +// offfical query result to show +// console.log('this is area about fetch all data') +// var query = c1.query(sql) +// var promise = query.execute(); +// promise.then(function(result) { +// result.pretty(); +// }); + +console.log('*******************************************') +c1.execute('use db') + +sql2 = 'select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;' +checkData(sql2,0,0,1) + +sql3 = 'select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';' +checkData(sql3,0,0,2) + +sql4 = 'select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;' +checkData(sql4,0,0,2) + +sql5 = 'select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';' +checkData(sql5,0,0,2) + +sql6 = 'select count(*) from tb where ts > 1623254400400000000;' +checkData(sql6,0,0,1) + +sql7 = 'select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';' +checkData(sql7,0,0,6) + +sql8 = 'select count(*) from tb where ts > now + 400000000b;' +c1.execute(sql8) + +sql9 = 'select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';' +checkData(sql9,0,0,7) + +sql10 = 'select count(*) from tb where ts <= 1623254400300000000;' +checkData(sql10,0,0,5) + +sql11 = 'select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';' +c1.execute(sql11) + +sql12 = 'select count(*) from tb where ts = 1623254400150000000;' +checkData(sql12,0,0,1) + +sql13 = 'select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';' +checkData(sql13,0,0,1) + +sql14 = 'select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;' +checkData(sql14,0,0,6) + +sql15 = 'select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';' +checkData(sql15,0,0,3) + +sql16 = 'select avg(speed) from tb interval(5000000000b);' +checkData(sql16,0,0,'2021-06-10 00:00:00.000000000') + +sql17 = 'select avg(speed) from tb interval(100000000b)' +checkData(sql17,0,1,3.6666666666666665) +checkData(sql17,1,1,4.000000000) + +checkData(sql17,2,0,'2021-06-10 00:00:00.300000000') +checkData(sql17,3,0,'2021-06-10 00:00:00.900000000') + +console.log("print break ") + +// sql18 = 'select avg(speed) from tb interval(999b)' +// c1.execute(sql18) + +console.log("print break2 ") +sql19 = 'select avg(speed) from tb interval(1u);' +checkData(sql19,2,1,2.000000000) +checkData(sql19,3,0,'2021-06-10 00:00:00.299999000') + +sql20 = 'select avg(speed) from tb interval(100000000b) sliding (100000000b);' +checkData(sql20,2,1,4.000000000) +checkData(sql20,3,0,'2021-06-10 00:00:00.900000000') + +sql21 = 'select last(*) from tb;' +checkData(sql21,0,0,'2021-06-10 00:00:00.999999999') + +sql22 = 'select first(*) from tb;' +checkData(sql22,0,0,'2021-06-10 00:00:00.100000001') + +// timezone support + +console.log('testing nanosecond support in other timestamps') + +c1.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);') +c1.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');') +c1.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);') +c1.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);') +c1.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);') +c1.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);') +c1.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);') + +sql23 = 'select * from tb2;' +checkData(sql23,0,0,'2021-06-10 00:00:00.100000001') +checkData(sql23,1,0,'2021-06-10 00:00:00.150000000') +checkData(sql23,2,1,4) +checkData(sql23,3,1,3) +checkData(sql23,4,2,'2021-06-11 00:00:00.300000001') +checkData(sql23,5,2,'2021-06-13 00:00:00.999999999') + +sql24 = 'select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';' +checkData(sql24,0,0,6) + +sql25 = 'select count(*) from tb2 where ts2 <= 1623340800400000000;' +checkData(sql25,0,0,5) + +sql26 = 'select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';' +checkData(sql26,0,0,1) + +sql27 = 'select count(*) from tb2 where ts2 = 1623340800300000001;' +checkData(sql27,0,0,1) + +sql28 = 'select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;' +checkData(sql28,0,0,5) + +sql29 = 'select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';' +checkData(sql29,0,0,3) + +sql30 = 'select count(*) from tb2 where ts2 <> 1623513600999999999;' +checkData(sql30,0,0,5) + +sql31 = 'select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';' +checkData(sql31,0,0,5) + +sql32 = 'select count(*) from tb2 where ts2 != 1623513600999999999;' +checkData(sql32,0,0,5) + +sql33 = 'select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';' +checkData(sql33,0,0,5) + +c1.execute('insert into tb2 values(now + 500000000b, 6, now +2d);') + +sql34 = 'select count(*) from tb2;' +checkData(sql34,0,0,7) + + +// check timezone support + +c1.execute('use db;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456789" , 1 ) values("2021-06-10T0:00:00.123456789+07:00" , 1.0);' ) +sql35 = 'select first(*) from stb1;' +checkData(sql35,0,0,'2021-06-10 01:00:00.123456789') + +c1.execute('use usdb;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10T0:00:00.123456+07:00" , 1.0);' ) +sql36 = 'select first(*) from stb1;' +checkData(sql36,0,0,'2021-06-10 01:00:00.123456') + +c1.execute('use msdb;') +c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +c1.execute('insert into stb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10T0:00:00.123456+07:00" , 1.0);' ) +sql36 = 'select first(*) from stb1;' +checkData(sql36,0,0,'2021-06-10 01:00:00.123') + + + + + + + diff --git a/tests/connectorTest/nodejsTest/nodetaos/cinterface.js b/tests/connectorTest/nodejsTest/nodetaos/cinterface.js new file mode 100644 index 0000000000..03d27e5593 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/cinterface.js @@ -0,0 +1,587 @@ +/** + * C Interface with TDengine Module + * @module CTaosInterface + */ + +const ref = require('ref-napi'); +const os = require('os'); +const ffi = require('ffi-napi'); +const ArrayType = require('ref-array-napi'); +const Struct = require('ref-struct-napi'); +const FieldTypes = require('./constants'); +const errors = require('./error'); +const TaosObjects = require('./taosobjects'); +const { NULL_POINTER } = require('ref-napi'); + +module.exports = CTaosInterface; + +function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let time = data.readInt64LE(currOffset); + currOffset += nbytes; + res.push(new TaosObjects.TaosTimestamp(time, precision)); + } + return res; +} +function convertBool(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = new Array(data.length); + for (let i = 0; i < data.length; i++) { + if (data[i] == 0) { + res[i] = false; + } + else if (data[i] == 1) { + res[i] = true; + } + else if (data[i] == FieldTypes.C_BOOL_NULL) { + res[i] = null; + } + } + return res; +} +function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readIntLE(currOffset, 1); + res.push(d == FieldTypes.C_TINYINT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readIntLE(currOffset, 2); + res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readInt32LE(currOffset); + res.push(d == FieldTypes.C_INT_NULL ? null : d); + currOffset += nbytes; + } + return res; +} +function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readInt64LE(currOffset); + res.push(d == FieldTypes.C_BIGINT_NULL ? null : BigInt(d)); + currOffset += nbytes; + } + return res; +} +function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = parseFloat(data.readFloatLE(currOffset).toFixed(5)); + res.push(isNaN(d) ? null : d); + currOffset += nbytes; + } + return res; +} +function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = parseFloat(data.readDoubleLE(currOffset).toFixed(16)); + res.push(isNaN(d) ? null : d); + currOffset += nbytes; + } + return res; +} + +function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + + let currOffset = 0; + while (currOffset < data.length) { + let len = data.readIntLE(currOffset, 2); + let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; + res.push(dataEntry.toString("utf-8")); + currOffset += nbytes; + } + return res; +} + +// Object with all the relevant converters from pblock data to javascript readable data +let convertFunctions = { + [FieldTypes.C_BOOL]: convertBool, + [FieldTypes.C_TINYINT]: convertTinyint, + [FieldTypes.C_SMALLINT]: convertSmallint, + [FieldTypes.C_INT]: convertInt, + [FieldTypes.C_BIGINT]: convertBigint, + [FieldTypes.C_FLOAT]: convertFloat, + [FieldTypes.C_DOUBLE]: convertDouble, + [FieldTypes.C_BINARY]: convertNchar, + [FieldTypes.C_TIMESTAMP]: convertTimestamp, + [FieldTypes.C_NCHAR]: convertNchar +} + +// Define TaosField structure +var char_arr = ArrayType(ref.types.char); +var TaosField = Struct({ + 'name': char_arr, +}); +TaosField.fields.name.type.size = 65; +TaosField.defineProperty('type', ref.types.char); +TaosField.defineProperty('bytes', ref.types.short); + + +/** + * + * @param {Object} config - Configuration options for the interface + * @return {CTaosInterface} + * @class CTaosInterface + * @classdesc The CTaosInterface is the interface through which Node.JS communicates data back and forth with TDengine. It is not advised to + * access this class directly and use it unless you understand what these functions do. + */ +function CTaosInterface(config = null, pass = false) { + ref.types.char_ptr = ref.refType(ref.types.char); + ref.types.void_ptr = ref.refType(ref.types.void); + ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); + /*Declare a bunch of functions first*/ + /* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */ + + if ('win32' == os.platform()) { + taoslibname = 'taos'; + } else { + taoslibname = 'libtaos'; + } + this.libtaos = ffi.Library(taoslibname, { + 'taos_options': [ref.types.int, [ref.types.int, ref.types.void_ptr]], + 'taos_init': [ref.types.void, []], + //TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port) + 'taos_connect': [ref.types.void_ptr, [ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int]], + //void taos_close(TAOS *taos) + 'taos_close': [ref.types.void, [ref.types.void_ptr]], + //int *taos_fetch_lengths(TAOS_RES *res); + 'taos_fetch_lengths': [ref.types.void_ptr, [ref.types.void_ptr]], + //int taos_query(TAOS *taos, char *sqlstr) + 'taos_query': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr]], + //int taos_affected_rows(TAOS_RES *res) + 'taos_affected_rows': [ref.types.int, [ref.types.void_ptr]], + //int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) + 'taos_fetch_block': [ref.types.int, [ref.types.void_ptr, ref.types.void_ptr]], + //int taos_num_fields(TAOS_RES *res); + 'taos_num_fields': [ref.types.int, [ref.types.void_ptr]], + //TAOS_ROW taos_fetch_row(TAOS_RES *res) + //TAOS_ROW is void **, but we set the return type as a reference instead to get the row + 'taos_fetch_row': [ref.refType(ref.types.void_ptr2), [ref.types.void_ptr]], + 'taos_print_row': [ref.types.int, [ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]], + //int taos_result_precision(TAOS_RES *res) + 'taos_result_precision': [ref.types.int, [ref.types.void_ptr]], + //void taos_free_result(TAOS_RES *res) + 'taos_free_result': [ref.types.void, [ref.types.void_ptr]], + //int taos_field_count(TAOS *taos) + 'taos_field_count': [ref.types.int, [ref.types.void_ptr]], + //TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) + 'taos_fetch_fields': [ref.refType(TaosField), [ref.types.void_ptr]], + //int taos_errno(TAOS *taos) + 'taos_errno': [ref.types.int, [ref.types.void_ptr]], + //char *taos_errstr(TAOS *taos) + 'taos_errstr': [ref.types.char_ptr, [ref.types.void_ptr]], + //void taos_stop_query(TAOS_RES *res); + 'taos_stop_query': [ref.types.void, [ref.types.void_ptr]], + //char *taos_get_server_info(TAOS *taos); + 'taos_get_server_info': [ref.types.char_ptr, [ref.types.void_ptr]], + //char *taos_get_client_info(); + 'taos_get_client_info': [ref.types.char_ptr, []], + + // ASYNC + // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param) + 'taos_query_a': [ref.types.void, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr]], + // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); + 'taos_fetch_rows_a': [ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr]], + + // Subscription + //TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval) + 'taos_subscribe': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.int, ref.types.char_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr, ref.types.int]], + // TAOS_RES *taos_consume(TAOS_SUB *tsub) + 'taos_consume': [ref.types.void_ptr, [ref.types.void_ptr]], + //void taos_unsubscribe(TAOS_SUB *tsub); + 'taos_unsubscribe': [ref.types.void, [ref.types.void_ptr]], + + // Continuous Query + //TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), + // int64_t stime, void *param, void (*callback)(void *)); + 'taos_open_stream': [ref.types.void_ptr, [ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr]], + //void taos_close_stream(TAOS_STREAM *tstr); + 'taos_close_stream': [ref.types.void, [ref.types.void_ptr]] + + }); + if (pass == false) { + if (config == null) { + this._config = ref.alloc(ref.types.char_ptr, ref.NULL); + } + else { + try { + this._config = ref.allocCString(config); + } + catch (err) { + throw "Attribute Error: config is expected as a str"; + } + } + if (config != null) { + this.libtaos.taos_options(3, this._config); + } + this.libtaos.taos_init(); + } + return this; +} +CTaosInterface.prototype.config = function config() { + return this._config; +} +CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) { + let _host, _user, _password, _db, _port; + try { + _host = host != null ? ref.allocCString(host) : ref.NULL; + } + catch (err) { + throw "Attribute Error: host is expected as a str"; + } + try { + _user = ref.allocCString(user) + } + catch (err) { + throw "Attribute Error: user is expected as a str"; + } + try { + _password = ref.allocCString(password); + } + catch (err) { + throw "Attribute Error: password is expected as a str"; + } + try { + _db = db != null ? ref.allocCString(db) : ref.NULL; + } + catch (err) { + throw "Attribute Error: db is expected as a str"; + } + try { + _port = ref.alloc(ref.types.int, port); + } + catch (err) { + throw TypeError("port is expected as an int") + } + let connection = this.libtaos.taos_connect(_host, _user, _password, _db, _port); + if (ref.isNull(connection)) { + throw new errors.TDError('Failed to connect to TDengine'); + } + else { + console.log('Successfully connected to TDengine'); + } + return connection; +} +CTaosInterface.prototype.close = function close(connection) { + this.libtaos.taos_close(connection); + console.log("Connection is closed"); +} +CTaosInterface.prototype.query = function query(connection, sql) { + return this.libtaos.taos_query(connection, ref.allocCString(sql)); +} +CTaosInterface.prototype.affectedRows = function affectedRows(result) { + return this.libtaos.taos_affected_rows(result); +} +CTaosInterface.prototype.useResult = function useResult(result) { + + let fields = []; + let pfields = this.fetchFields(result); + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, this.fieldsCount(result) * 68, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 65, i)), + type: pfields[i + 65], + bytes: pfields[i + 66] + }) + } + } + return fields; +} +CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { + let pblock = ref.NULL_POINTER; + let num_of_rows = this.libtaos.taos_fetch_block(result, pblock); + if (ref.isNull(pblock.deref()) == true) { + return { block: null, num_of_rows: 0 }; + } + + var fieldL = this.libtaos.taos_fetch_lengths(result); + let precision = this.libtaos.taos_result_precision(result); + + var fieldlens = []; + + if (ref.isNull(fieldL) == false) { + for (let i = 0; i < fields.length; i++) { + let plen = ref.reinterpret(fieldL, 4, i * 4); + let len = plen.readInt32LE(0); + fieldlens.push(len); + } + } + + let blocks = new Array(fields.length); + blocks.fill(null); + num_of_rows = Math.abs(num_of_rows); + let offset = 0; + let ptr = pblock.deref(); + + for (let i = 0; i < fields.length; i++) { + pdata = ref.reinterpret(ptr, 8, i * 8); + if (ref.isNull(pdata.readPointer())) { + blocks[i] = new Array(); + } else { + pdata = ref.ref(pdata.readPointer()); + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, precision); + } + } + return { blocks: blocks, num_of_rows } +} +CTaosInterface.prototype.fetchRow = function fetchRow(result, fields) { + let row = this.libtaos.taos_fetch_row(result); + return row; +} +CTaosInterface.prototype.freeResult = function freeResult(result) { + this.libtaos.taos_free_result(result); + result = null; +} +/** Number of fields returned in this result handle, must use with async */ +CTaosInterface.prototype.numFields = function numFields(result) { + return this.libtaos.taos_num_fields(result); +} +// Fetch fields count by connection, the latest query +CTaosInterface.prototype.fieldsCount = function fieldsCount(result) { + return this.libtaos.taos_field_count(result); +} +CTaosInterface.prototype.fetchFields = function fetchFields(result) { + return this.libtaos.taos_fetch_fields(result); +} +CTaosInterface.prototype.errno = function errno(result) { + return this.libtaos.taos_errno(result); +} +CTaosInterface.prototype.errStr = function errStr(result) { + return ref.readCString(this.libtaos.taos_errstr(result)); +} +// Async +CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, param = ref.ref(ref.NULL)) { + // void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, int), void *param) + callback = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], callback); + this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param); + return param; +} +/** Asynchrnously fetches the next block of rows. Wraps callback and transfers a 4th argument to the cursor, the row data as blocks in javascript form + * Note: This isn't a recursive function, in order to fetch all data either use the TDengine cursor object, TaosQuery object, or implement a recrusive + * function yourself using the libtaos.taos_fetch_rows_a function + */ +CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback, param = ref.ref(ref.NULL)) { + // void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); + var cti = this; + // wrap callback with a function so interface can access the numOfRows value, needed in order to properly process the binary data + let asyncCallbackWrapper = function (param2, result2, numOfRows2) { + // Data preparation to pass to cursor. Could be bottleneck in query execution callback times. + let row = cti.libtaos.taos_fetch_row(result2); + let fields = cti.fetchFields_a(result2); + + let precision = cti.libtaos.taos_result_precision(result2); + let blocks = new Array(fields.length); + blocks.fill(null); + numOfRows2 = Math.abs(numOfRows2); + let offset = 0; + var fieldL = cti.libtaos.taos_fetch_lengths(result); + var fieldlens = []; + if (ref.isNull(fieldL) == false) { + + for (let i = 0; i < fields.length; i++) { + let plen = ref.reinterpret(fieldL, 8, i * 8); + let len = ref.get(plen, 0, ref.types.int32); + fieldlens.push(len); + } + } + if (numOfRows2 > 0) { + for (let i = 0; i < fields.length; i++) { + if (ref.isNull(pdata.readPointer())) { + blocks[i] = new Array(); + } else { + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + let prow = ref.reinterpret(row, 8, i * 8); + prow = prow.readPointer(); + prow = ref.ref(prow); + blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, precision); + //offset += fields[i]['bytes'] * numOfRows2; + } + } + } + callback(param2, result2, numOfRows2, blocks); + } + asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.types.int], asyncCallbackWrapper); + this.libtaos.taos_fetch_rows_a(result, asyncCallbackWrapper, param); + return param; +} +// Fetch field meta data by result handle +CTaosInterface.prototype.fetchFields_a = function fetchFields_a(result) { + let pfields = this.fetchFields(result); + let pfieldscount = this.numFields(result); + let fields = []; + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, 68 * pfieldscount, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 64 = name //65 = type, 66 - 67 = bytes + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 65, i)), + type: pfields[i + 65], + bytes: pfields[i + 66] + }) + } + } + return fields; +} +// Stop a query by result handle +CTaosInterface.prototype.stopQuery = function stopQuery(result) { + if (result != null) { + this.libtaos.taos_stop_query(result); + } + else { + throw new errors.ProgrammingError("No result handle passed to stop query"); + } +} +CTaosInterface.prototype.getServerInfo = function getServerInfo(connection) { + return ref.readCString(this.libtaos.taos_get_server_info(connection)); +} +CTaosInterface.prototype.getClientInfo = function getClientInfo() { + return ref.readCString(this.libtaos.taos_get_client_info()); +} + +// Subscription +CTaosInterface.prototype.subscribe = function subscribe(connection, restart, topic, sql, interval) { + let topicOrig = topic; + let sqlOrig = sql; + try { + sql = sql != null ? ref.allocCString(sql) : ref.alloc(ref.types.char_ptr, ref.NULL); + } + catch (err) { + throw "Attribute Error: sql is expected as a str"; + } + try { + topic = topic != null ? ref.allocCString(topic) : ref.alloc(ref.types.char_ptr, ref.NULL); + } + catch (err) { + throw TypeError("topic is expected as a str"); + } + + restart = ref.alloc(ref.types.int, restart); + + let subscription = this.libtaos.taos_subscribe(connection, restart, topic, sql, null, null, interval); + if (ref.isNull(subscription)) { + throw new errors.TDError('Failed to subscribe to TDengine | Database: ' + dbOrig + ', Table: ' + tableOrig); + } + else { + console.log('Successfully subscribed to TDengine - Topic: ' + topicOrig); + } + return subscription; +} + +CTaosInterface.prototype.consume = function consume(subscription) { + let result = this.libtaos.taos_consume(subscription); + let fields = []; + let pfields = this.fetchFields(result); + if (ref.isNull(pfields) == false) { + pfields = ref.reinterpret(pfields, this.numFields(result) * 68, 0); + for (let i = 0; i < pfields.length; i += 68) { + //0 - 63 = name //64 - 65 = bytes, 66 - 67 = type + fields.push({ + name: ref.readCString(ref.reinterpret(pfields, 64, i)), + bytes: pfields[i + 64], + type: pfields[i + 66] + }) + } + } + + let data = []; + while (true) { + let { blocks, num_of_rows } = this.fetchBlock(result, fields); + if (num_of_rows == 0) { + break; + } + for (let i = 0; i < num_of_rows; i++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let j = 0; j < fields.length; j++) { + rowBlock[j] = blocks[j][i]; + } + data[data.length - 1] = (rowBlock); + } + } + return { data: data, fields: fields, result: result }; +} +CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) { + //void taos_unsubscribe(TAOS_SUB *tsub); + this.libtaos.taos_unsubscribe(subscription); +} + +// Continuous Query +CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime, stoppingCallback, param = ref.ref(ref.NULL)) { + try { + sql = ref.allocCString(sql); + } + catch (err) { + throw "Attribute Error: sql string is expected as a str"; + } + var cti = this; + let asyncCallbackWrapper = function (param2, result2, row) { + let fields = cti.fetchFields_a(result2); + let precision = cti.libtaos.taos_result_precision(result2); + let blocks = new Array(fields.length); + blocks.fill(null); + let numOfRows2 = 1; + let offset = 0; + if (numOfRows2 > 0) { + for (let i = 0; i < fields.length; i++) { + if (!convertFunctions[fields[i]['type']]) { + throw new errors.DatabaseError("Invalid data type returned from database"); + } + blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, precision); + offset += fields[i]['bytes'] * numOfRows2; + } + } + callback(param2, result2, blocks, fields); + } + asyncCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2)], asyncCallbackWrapper); + asyncStoppingCallbackWrapper = ffi.Callback(ref.types.void, [ref.types.void_ptr], stoppingCallback); + let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper); + if (ref.isNull(streamHandle)) { + throw new errors.TDError('Failed to open a stream with TDengine'); + return false; + } + else { + console.log("Succesfully opened stream"); + return streamHandle; + } +} +CTaosInterface.prototype.closeStream = function closeStream(stream) { + this.libtaos.taos_close_stream(stream); + console.log("Closed stream"); +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/connection.js b/tests/connectorTest/nodejsTest/nodetaos/connection.js new file mode 100644 index 0000000000..08186f8705 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/connection.js @@ -0,0 +1,84 @@ +const TDengineCursor = require('./cursor') +const CTaosInterface = require('./cinterface') +module.exports = TDengineConnection; + +/** + * TDengine Connection Class + * @param {object} options - Options for configuring the connection with TDengine + * @return {TDengineConnection} + * @class TDengineConnection + * @constructor + * @example + * //Initialize a new connection + * var conn = new TDengineConnection({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) + * + */ +function TDengineConnection(options) { + this._conn = null; + this._host = null; + this._user = "root"; //The default user + this._password = "taosdata"; //The default password + this._database = null; + this._port = 0; + this._config = null; + this._chandle = null; + this._configConn(options) + return this; +} +/** + * Configure the connection to TDengine + * @private + * @memberof TDengineConnection + */ +TDengineConnection.prototype._configConn = function _configConn(options) { + if (options['host']) { + this._host = options['host']; + } + if (options['user']) { + this._user = options['user']; + } + if (options['password']) { + this._password = options['password']; + } + if (options['database']) { + this._database = options['database']; + } + if (options['port']) { + this._port = options['port']; + } + if (options['config']) { + this._config = options['config']; + } + this._chandle = new CTaosInterface(this._config); + this._conn = this._chandle.connect(this._host, this._user, this._password, this._database, this._port); +} +/** Close the connection to TDengine */ +TDengineConnection.prototype.close = function close() { + this._chandle.close(this._conn); +} +/** + * Initialize a new cursor to interact with TDengine with + * @return {TDengineCursor} + */ +TDengineConnection.prototype.cursor = function cursor() { + //Pass the connection object to the cursor + return new TDengineCursor(this); +} +TDengineConnection.prototype.commit = function commit() { + return this; +} +TDengineConnection.prototype.rollback = function rollback() { + return this; +} +/** + * Clear the results from connector + * @private + */ +/* + TDengineConnection.prototype._clearResultSet = function _clearResultSet() { + var result = this._chandle.useResult(this._conn).result; + if (result) { + this._chandle.freeResult(result) + } +} +*/ diff --git a/tests/connectorTest/nodejsTest/nodetaos/constants.js b/tests/connectorTest/nodejsTest/nodetaos/constants.js new file mode 100644 index 0000000000..cd6a0c9fba --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/constants.js @@ -0,0 +1,76 @@ +/** + * Contains the the definitions/values assigned to various field types + * @module FieldTypes + */ +/** + * TDengine Field Types and their type codes + * @typedef {Object} FieldTypes + * @global + * @property {number} C_NULL - Null + * @property {number} C_BOOL - Boolean. Note, 0x02 is the C_BOOL_NULL value. + * @property {number} C_TINYINT - Tiny Int, values in the range [-2^7+1, 2^7-1]. Note, -2^7 has been used as the C_TINYINT_NULL value + * @property {number} C_SMALLINT - Small Int, values in the range [-2^15+1, 2^15-1]. Note, -2^15 has been used as the C_SMALLINT_NULL value + * @property {number} C_INT - Int, values in the range [-2^31+1, 2^31-1]. Note, -2^31 has been used as the C_INT_NULL value + * @property {number} C_BIGINT - Big Int, values in the range [-2^59, 2^59]. + * @property {number} C_FLOAT - Float, values in the range [-3.4E38, 3.4E38], accurate up to 6-7 decimal places. + * @property {number} C_DOUBLE - Double, values in the range [-1.7E308, 1.7E308], accurate up to 15-16 decimal places. + * @property {number} C_BINARY - Binary, encoded in utf-8. + * @property {number} C_TIMESTAMP - Timestamp in format "YYYY:MM:DD HH:MM:SS.MMM". Measured in number of milliseconds passed after + 1970-01-01 08:00:00.000 GMT. + * @property {number} C_NCHAR - NChar field type encoded in ASCII, a wide string. + * + * + * + * @property {number} C_TIMESTAMP_MILLI - The code for millisecond timestamps, as returned by libtaos.taos_result_precision(result). + * @property {number} C_TIMESTAMP_MICRO - The code for microsecond timestamps, as returned by libtaos.taos_result_precision(result). + */ +module.exports = { + C_NULL : 0, + C_BOOL : 1, + C_TINYINT : 2, + C_SMALLINT : 3, + C_INT : 4, + C_BIGINT : 5, + C_FLOAT : 6, + C_DOUBLE : 7, + C_BINARY : 8, + C_TIMESTAMP : 9, + C_NCHAR : 10, + // NULL value definition + // NOTE: These values should change according to C definition in tsdb.h + C_BOOL_NULL : 2, + C_TINYINT_NULL : -128, + C_SMALLINT_NULL : -32768, + C_INT_NULL : -2147483648, + C_BIGINT_NULL : -9223372036854775808, + C_FLOAT_NULL : 2146435072, + C_DOUBLE_NULL : -9223370937343148032, + C_NCHAR_NULL : 4294967295, + C_BINARY_NULL : 255, + C_TIMESTAMP_MILLI : 0, + C_TIMESTAMP_MICRO : 1, + getType, +} + +const typeCodesToName = { + 0 : 'Null', + 1 : 'Boolean', + 2 : 'Tiny Int', + 3 : 'Small Int', + 4 : 'Int', + 5 : 'Big Int', + 6 : 'Float', + 7 : 'Double', + 8 : 'Binary', + 9 : 'Timestamp', + 10 : 'Nchar', +} + +/** + * @function + * @param {number} typecode - The code to get the name of the type for + * @return {string} Name of the field type + */ +function getType(typecode) { + return typeCodesToName[typecode]; +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/cursor.js b/tests/connectorTest/nodejsTest/nodetaos/cursor.js new file mode 100644 index 0000000000..f879d89d48 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/cursor.js @@ -0,0 +1,476 @@ +const ref = require('ref-napi'); +require('./globalfunc.js') +const CTaosInterface = require('./cinterface') +const errors = require('./error') +const TaosQuery = require('./taosquery') +const { PerformanceObserver, performance } = require('perf_hooks'); +module.exports = TDengineCursor; + +/** + * @typedef {Object} Buffer - A Node.js buffer. Please refer to {@link https://nodejs.org/api/buffer.html} for more details + * @global + */ + +/** + * @class TDengineCursor + * @classdesc The TDengine Cursor works directly with the C Interface which works with TDengine. It refrains from + * returning parsed data and majority of functions return the raw data such as cursor.fetchall() as compared to the TaosQuery class which + * has functions that "prettify" the data and add more functionality and can be used through cursor.query("your query"). Instead of + * promises, the class and its functions use callbacks. + * @param {TDengineConnection} - The TDengine Connection this cursor uses to interact with TDengine + * @property {data} - Latest retrieved data from query execution. It is an empty array by default + * @property {fields} - Array of the field objects in order from left to right of the latest data retrieved + * @since 1.0.0 + */ +function TDengineCursor(connection = null) { + //All parameters are store for sync queries only. + this._rowcount = -1; + this._connection = null; + this._result = null; + this._fields = null; + this.data = []; + this.fields = null; + if (connection != null) { + this._connection = connection + this._chandle = connection._chandle //pass through, just need library loaded. + } + else { + throw new errors.ProgrammingError("A TDengineConnection object is required to be passed to the TDengineCursor"); + } + +} +/** + * Get the row counts of the latest query + * @since 1.0.0 + * @return {number} Rowcount + */ +TDengineCursor.prototype.rowcount = function rowcount() { + return this._rowcount; +} +/** + * Close the cursor by setting its connection to null and freeing results from the connection and resetting the results it has stored + * @return {boolean} Whether or not the cursor was succesfully closed + * @since 1.0.0 + */ +TDengineCursor.prototype.close = function close() { + if (this._connection == null) { + return false; + } + this._connection._clearResultSet(); + this._reset_result(); + this._connection = null; + return true; +} +/** + * Create a TaosQuery object to perform a query to TDengine and retrieve data. + * @param {string} operation - The operation string to perform a query on + * @param {boolean} execute - Whether or not to immedietely perform the query. Default is false. + * @return {TaosQuery | Promise} A TaosQuery object + * @example + * var query = cursor.query("select count(*) from meterinfo.meters"); + * query.execute(); + * @since 1.0.6 + */ +TDengineCursor.prototype.query = function query(operation, execute = false) { + return new TaosQuery(operation, this, execute); +} + +/** + * Execute a query. Also stores all the field meta data returned from the query into cursor.fields. It is preferable to use cursor.query() to create + * queries and execute them instead of using the cursor object directly. + * @param {string} operation - The query operation to execute in the taos shell + * @param {Object} options - Execution options object. quiet : true turns off logging from queries + * @param {boolean} options.quiet - True if you want to surpress logging such as "Query OK, 1 row(s) ..." + * @param {function} callback - A callback function to execute after the query is made to TDengine + * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query + * @since 1.0.0 + */ +TDengineCursor.prototype.execute = function execute(operation, options, callback) { + if (operation == undefined) { + throw new errors.ProgrammingError('No operation passed as argument'); + return null; + } + + if (typeof options == 'function') { + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + + this._reset_result(); + + let stmt = operation; + let time = 0; + let res; + if (options['quiet'] != true) { + const obs = new PerformanceObserver((items) => { + time = items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + this._result = this._chandle.query(this._connection._conn, stmt); + performance.mark('B'); + performance.measure('query', 'A', 'B'); + } + else { + this._result = this._chandle.query(this._connection._conn, stmt); + } + res = this._chandle.errno(this._result); + if (res == 0) { + let fieldCount = this._chandle.fieldsCount(this._result); + if (fieldCount == 0) { + let affectedRowCount = this._chandle.affectedRows(this._result); + let response = this._createAffectedResponse(affectedRowCount, time) + if (options['quiet'] != true) { + console.log(response); + } + wrapCB(callback); + return affectedRowCount; //return num of affected rows, common with insert, use statements + } + else { + this._fields = this._chandle.useResult(this._result); + this.fields = this._fields; + wrapCB(callback); + + return this._result; //return a pointer to the result + } + } + else { + throw new errors.ProgrammingError(this._chandle.errStr(this._result)) + } + +} +TDengineCursor.prototype._createAffectedResponse = function (num, time) { + return "Query OK, " + num + " row(s) affected (" + (time * 0.001).toFixed(8) + "s)"; +} +TDengineCursor.prototype._createSetResponse = function (num, time) { + return "Query OK, " + num + " row(s) in set (" + (time * 0.001).toFixed(8) + "s)"; +} +TDengineCursor.prototype.executemany = function executemany() { + +} +TDengineCursor.prototype.fetchone = function fetchone() { + +} +TDengineCursor.prototype.fetchmany = function fetchmany() { + +} +/** + * Fetches all results from a query and also stores results into cursor.data. It is preferable to use cursor.query() to create + * queries and execute them instead of using the cursor object directly. + * @param {function} callback - callback function executing on the complete fetched data + * @return {Array} The resultant array, with entries corresponding to each retreived row from the query results, sorted in + * order by the field name ordering in the table. + * @since 1.0.0 + * @example + * cursor.execute('select * from db.table'); + * var data = cursor.fetchall(function(results) { + * results.forEach(row => console.log(row)); + * }) + */ +TDengineCursor.prototype.fetchall = function fetchall(options, callback) { + if (this._result == null || this._fields == null) { + throw new errors.OperationalError("Invalid use of fetchall, either result or fields from query are null. First execute a query first"); + } + + let num_of_rows = this._chandle.affectedRows(this._result); + let data = new Array(num_of_rows); + + this._rowcount = 0; + + let time = 0; + const obs = new PerformanceObserver((items) => { + time += items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + while (true) { + let blockAndRows = this._chandle.fetchBlock(this._result, this._fields); + // console.log(blockAndRows); + // break; + let block = blockAndRows.blocks; + let num_of_rows = blockAndRows.num_of_rows; + if (num_of_rows == 0) { + break; + } + this._rowcount += num_of_rows; + let numoffields = this._fields.length; + for (let i = 0; i < num_of_rows; i++) { + // data.push([]); + + let rowBlock = new Array(numoffields); + for (let j = 0; j < numoffields; j++) { + rowBlock[j] = block[j][i]; + } + data[this._rowcount - num_of_rows + i] = (rowBlock); + // data.push(rowBlock); + } + + } + + performance.mark('B'); + performance.measure('query', 'A', 'B'); + let response = this._createSetResponse(this._rowcount, time) + console.log(response); + + // this._connection._clearResultSet(); + let fields = this.fields; + this._reset_result(); + this.data = data; + this.fields = fields; + + wrapCB(callback, data); + + return data; +} +/** + * Asynchrnously execute a query to TDengine. NOTE, insertion requests must be done in sync if on the same table. + * @param {string} operation - The query operation to execute in the taos shell + * @param {Object} options - Execution options object. quiet : true turns off logging from queries + * @param {boolean} options.quiet - True if you want to surpress logging such as "Query OK, 1 row(s) ..." + * @param {function} callback - A callback function to execute after the query is made to TDengine + * @return {number | Buffer} Number of affected rows or a Buffer that points to the results of the query + * @since 1.0.0 + */ +TDengineCursor.prototype.execute_a = function execute_a(operation, options, callback, param) { + if (operation == undefined) { + throw new errors.ProgrammingError('No operation passed as argument'); + return null; + } + if (typeof options == 'function') { + //we expect the parameter after callback to be param + param = callback; + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + if (typeof callback != 'function') { + throw new errors.ProgrammingError("No callback function passed to execute_a function"); + } + // Async wrapper for callback; + var cr = this; + + let asyncCallbackWrapper = function (param2, res2, resCode) { + if (typeof callback == 'function') { + callback(param2, res2, resCode); + } + + if (resCode >= 0) { + // let fieldCount = cr._chandle.numFields(res2); + // if (fieldCount == 0) { + // //cr._chandle.freeResult(res2); + // return res2; + // } + // else { + // return res2; + // } + return res2; + + } + else { + throw new errors.ProgrammingError("Error occuring with use of execute_a async function. Status code was returned with failure"); + } + } + + let stmt = operation; + let time = 0; + + // Use ref module to write to buffer in cursor.js instead of taosquery to maintain a difference in levels. Have taosquery stay high level + // through letting it pass an object as param + var buf = ref.alloc('Object'); + ref.writeObject(buf, 0, param); + const obs = new PerformanceObserver((items) => { + time = items.getEntries()[0].duration; + performance.clearMarks(); + }); + obs.observe({ entryTypes: ['measure'] }); + performance.mark('A'); + this._chandle.query_a(this._connection._conn, stmt, asyncCallbackWrapper, buf); + performance.mark('B'); + performance.measure('query', 'A', 'B'); + return param; + + +} +/** + * Fetches all results from an async query. It is preferable to use cursor.query_a() to create + * async queries and execute them instead of using the cursor object directly. + * @param {Object} options - An options object containing options for this function + * @param {function} callback - callback function that is callbacked on the COMPLETE fetched data (it is calledback only once!). + * Must be of form function (param, result, rowCount, rowData) + * @param {Object} param - A parameter that is also passed to the main callback function. Important! Param must be an object, and the key "data" cannot be used + * @return {{param:Object, result:Buffer}} An object with the passed parameters object and the buffer instance that is a pointer to the result handle. + * @since 1.2.0 + * @example + * cursor.execute('select * from db.table'); + * var data = cursor.fetchall(function(results) { + * results.forEach(row => console.log(row)); + * }) + */ +TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callback, param = {}) { + if (typeof options == 'function') { + //we expect the parameter after callback to be param + param = callback; + callback = options; + } + if (typeof options != 'object') options = {} + if (this._connection == null) { + throw new errors.ProgrammingError('Cursor is not connected'); + } + if (typeof callback != 'function') { + throw new errors.ProgrammingError('No callback function passed to fetchall_a function') + } + if (param.data) { + throw new errors.ProgrammingError("You aren't allowed to set the key 'data' for the parameters object"); + } + let buf = ref.alloc('Object'); + param.data = []; + var cr = this; + + // This callback wrapper accumulates the data from the fetch_rows_a function from the cinterface. It is accumulated by passing the param2 + // object which holds accumulated data in the data key. + let asyncCallbackWrapper = function asyncCallbackWrapper(param2, result2, numOfRows2, rowData) { + param2 = ref.readObject(param2); //return the object back from the pointer + if (numOfRows2 > 0 && rowData.length != 0) { + // Keep fetching until now rows left. + let buf2 = ref.alloc('Object'); + param2.data.push(rowData); + ref.writeObject(buf2, 0, param2); + cr._chandle.fetch_rows_a(result2, asyncCallbackWrapper, buf2); + } + else { + let finalData = param2.data; + let fields = cr._chandle.fetchFields_a(result2); + let data = []; + for (let i = 0; i < finalData.length; i++) { + let num_of_rows = finalData[i][0].length; //fetched block number i; + let block = finalData[i]; + for (let j = 0; j < num_of_rows; j++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let k = 0; k < fields.length; k++) { + rowBlock[k] = block[k][j]; + } + data[data.length - 1] = rowBlock; + } + } + cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks! + callback(param2, result2, numOfRows2, { data: data, fields: fields }); + + } + } + ref.writeObject(buf, 0, param); + param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param + return { param: param, result: result }; +} +/** + * Stop a query given the result handle. + * @param {Buffer} result - The buffer that acts as the result handle + * @since 1.3.0 + */ +TDengineCursor.prototype.stopQuery = function stopQuery(result) { + this._chandle.stopQuery(result); +} +TDengineCursor.prototype._reset_result = function _reset_result() { + this._rowcount = -1; + if (this._result != null) { + this._chandle.freeResult(this._result); + } + this._result = null; + this._fields = null; + this.data = []; + this.fields = null; +} +/** + * Get server info such as version number + * @return {string} + * @since 1.3.0 + */ +TDengineCursor.prototype.getServerInfo = function getServerInfo() { + return this._chandle.getServerInfo(this._connection._conn); +} +/** + * Get client info such as version number + * @return {string} + * @since 1.3.0 + */ +TDengineCursor.prototype.getClientInfo = function getClientInfo() { + return this._chandle.getClientInfo(); +} +/** + * Subscribe to a table from a database in TDengine. + * @param {Object} config - A configuration object containing the configuration options for the subscription + * @param {string} config.restart - whether or not to continue a subscription if it already exits, otherwise start from beginning + * @param {string} config.topic - The unique identifier of a subscription + * @param {string} config.sql - A sql statement for data query + * @param {string} config.interval - The pulling interval + * @return {Buffer} A buffer pointing to the subscription session handle + * @since 1.3.0 + */ +TDengineCursor.prototype.subscribe = function subscribe(config) { + let restart = config.restart ? 1 : 0; + return this._chandle.subscribe(this._connection._conn, restart, config.topic, config.sql, config.interval); +}; +/** + * An infinite loop that consumes the latest data and calls a callback function that is provided. + * @param {Buffer} subscription - A buffer object pointing to the subscription session handle + * @param {function} callback - The callback function that takes the row data, field/column meta data, and the subscription session handle as input + * @since 1.3.0 + */ +TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) { + while (true) { + let { data, fields, result } = this._chandle.consume(subscription); + callback(data, fields, result); + } +} +/** + * Unsubscribe the provided buffer object pointing to the subscription session handle + * @param {Buffer} subscription - A buffer object pointing to the subscription session handle that is to be unsubscribed + * @since 1.3.0 + */ +TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) { + this._chandle.unsubscribe(subscription); +} +/** + * Open a stream with TDengine to run the sql query periodically in the background + * @param {string} sql - The query to run + * @param {function} callback - The callback function to run after each query, accepting inputs as param, result handle, data, fields meta data + * @param {number} stime - The time of the stream starts in the form of epoch milliseconds. If 0 is given, the start time is set as the current time. + * @param {function} stoppingCallback - The callback function to run when the continuous query stops. It takes no inputs + * @param {object} param - A parameter that is passed to the main callback function + * @return {Buffer} A buffer pointing to the stream handle + * @since 1.3.0 + */ +TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) { + let buf = ref.alloc('Object'); + ref.writeObject(buf, 0, param); + + let asyncCallbackWrapper = function (param2, result2, blocks, fields) { + let data = []; + let num_of_rows = blocks[0].length; + for (let j = 0; j < num_of_rows; j++) { + data.push([]); + let rowBlock = new Array(fields.length); + for (let k = 0; k < fields.length; k++) { + rowBlock[k] = blocks[k][j]; + } + data[data.length - 1] = rowBlock; + } + callback(param2, result2, blocks, fields); + } + return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf); +} +/** + * Close a stream + * @param {Buffer} - A buffer pointing to the handle of the stream to be closed + * @since 1.3.0 + */ +TDengineCursor.prototype.closeStream = function closeStream(stream) { + this._chandle.closeStream(stream); +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/error.js b/tests/connectorTest/nodejsTest/nodetaos/error.js new file mode 100644 index 0000000000..8ab91a50c7 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/error.js @@ -0,0 +1,96 @@ + +/** + * TDengine Error Class + * @ignore + */ +class TDError extends Error { + constructor(args) { + super(args) + this.name = "TDError"; + } +} +/** Exception raised for important warnings like data truncations while inserting. + * @ignore + */ +class Warning extends Error { + constructor(args) { + super(args) + this.name = "Warning"; + } +} +/** Exception raised for errors that are related to the database interface rather than the database itself. + * @ignore + */ +class InterfaceError extends TDError { + constructor(args) { + super(args) + this.name = "TDError.InterfaceError"; + } +} +/** Exception raised for errors that are related to the database. + * @ignore + */ +class DatabaseError extends TDError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError"; + } +} +/** Exception raised for errors that are due to problems with the processed data like division by zero, numeric value out of range. + * @ignore + */ +class DataError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.DataError"; + } +} +/** Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer + * @ignore + */ +class OperationalError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.OperationalError"; + } +} +/** Exception raised when the relational integrity of the database is affected. + * @ignore + */ +class IntegrityError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.IntegrityError"; + } +} +/** Exception raised when the database encounters an internal error. + * @ignore + */ +class InternalError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.InternalError"; + } +} +/** Exception raised for programming errors. + * @ignore + */ +class ProgrammingError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.ProgrammingError"; + } +} +/** Exception raised in case a method or database API was used which is not supported by the database. + * @ignore + */ +class NotSupportedError extends DatabaseError { + constructor(args) { + super(args) + this.name = "TDError.DatabaseError.NotSupportedError"; + } +} + +module.exports = { + TDError, Warning, InterfaceError, DatabaseError, DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError +}; diff --git a/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js b/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js new file mode 100644 index 0000000000..cf7344c868 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/globalfunc.js @@ -0,0 +1,14 @@ +/* Wrap a callback, reduce code amount */ +function wrapCB(callback, input) { + if (typeof callback === 'function') { + callback(input); + } + return; +} +global.wrapCB = wrapCB; +function toTaosTSString(date) { + date = new Date(date); + let tsArr = date.toISOString().split("T") + return tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1); +} +global.toTaosTSString = toTaosTSString; diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js b/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js new file mode 100644 index 0000000000..3bc0fe0aca --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosobjects.js @@ -0,0 +1,152 @@ +const FieldTypes = require('./constants'); +const util = require('util'); +/** + * Various objects such as TaosRow and TaosColumn that help make parsing data easier + * @module TaosObjects + * + */ + +/** + * The TaosRow object. Contains the data from a retrieved row from a database and functions that parse the data. + * @typedef {Object} TaosRow - A row of data retrieved from a table. + * @global + * @example + * var trow = new TaosRow(row); + * console.log(trow.data); + */ +function TaosRow(row) { + this.data = row; + this.length = row.length; + return this; +} + +/** + * @typedef {Object} TaosField - A field/column's metadata from a table. + * @global + * @example + * var tfield = new TaosField(field); + * console.log(tfield.name); + */ + +function TaosField(field) { + this._field = field; + this.name = field.name; + this.type = FieldTypes.getType(field.type); + return this; +} + +/** + * A TaosTimestamp object, which is the standard date object with added functionality + * @global + * @memberof TaosObjects + * @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000 + */ +class TaosTimestamp extends Date { + constructor(date, precision = 0) { + if (precision === 1) { + super(Math.floor(date / 1000)); + this.precisionExtras = date % 1000; + } else if (precision === 2) { + // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected + super(parseInt(BigInt(date) / 1000000n)); + // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) + this.precisionExtras = parseInt(BigInt(date) % 1000000n); + } else { + super(parseInt(date)); + } + this.precision = precision; + } + + /** + * TDengine raw timestamp. + * @returns raw taos timestamp (int64) + */ + taosTimestamp() { + if (this.precision == 1) { + return (this * 1000 + this.precisionExtras); + } else if (this.precision == 2) { + return (this * 1000000 + this.precisionExtras); + } else { + return Math.floor(this); + } + } + + /** + * Gets the microseconds of a Date. + * @return {Int} A microseconds integer + */ + getMicroseconds() { + if (this.precision == 1) { + return this.getMilliseconds() * 1000 + this.precisionExtras; + } else if (this.precision == 2) { + return this.getMilliseconds() * 1000 + this.precisionExtras / 1000; + } else { + return 0; + } + } + /** + * Gets the nanoseconds of a TaosTimestamp. + * @return {Int} A nanoseconds integer + */ + getNanoseconds() { + if (this.precision == 1) { + return this.getMilliseconds() * 1000000 + this.precisionExtras * 1000; + } else if (this.precision == 2) { + return this.getMilliseconds() * 1000000 + this.precisionExtras; + } else { + return 0; + } + } + + /** + * @returns {String} a string for timestamp string format + */ + _precisionExtra() { + if (this.precision == 1) { + return String(this.precisionExtras).padStart(3, '0'); + } else if (this.precision == 2) { + return String(this.precisionExtras).padStart(6, '0'); + } else { + return ''; + } + } + /** + * @function Returns the date into a string usable by TDengine + * @return {string} A Taos Timestamp String + */ + toTaosString() { + var tzo = -this.getTimezoneOffset(), + dif = tzo >= 0 ? '+' : '-', + pad = function (num) { + var norm = Math.floor(Math.abs(num)); + return (norm < 10 ? '0' : '') + norm; + }, + pad2 = function (num) { + var norm = Math.floor(Math.abs(num)); + if (norm < 10) return '00' + norm; + if (norm < 100) return '0' + norm; + if (norm < 1000) return norm; + }; + return this.getFullYear() + + '-' + pad(this.getMonth() + 1) + + '-' + pad(this.getDate()) + + ' ' + pad(this.getHours()) + + ':' + pad(this.getMinutes()) + + ':' + pad(this.getSeconds()) + + '.' + pad2(this.getMilliseconds()) + + '' + this._precisionExtra(); + } + + /** + * Custom console.log + * @returns {String} string format for debug + */ + [util.inspect.custom](depth, opts) { + return this.toTaosString() + JSON.stringify({ precision: this.precision, precisionExtras: this.precisionExtras }, opts); + } + toString() { + return this.toTaosString(); + } +} + +module.exports = { TaosRow, TaosField, TaosTimestamp } diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosquery.js b/tests/connectorTest/nodejsTest/nodetaos/taosquery.js new file mode 100644 index 0000000000..eeede3ff68 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosquery.js @@ -0,0 +1,112 @@ +var TaosResult = require('./taosresult') +require('./globalfunc.js') +module.exports = TaosQuery; + + +/** + * @class TaosQuery + * @classdesc The TaosQuery class is one level above the TDengine Cursor in that it makes sure to generally return promises from functions, and wrap + * all data with objects such as wrapping a row of data with Taos Row. This is meant to enable an higher level API that allows additional + * functionality and save time whilst also making it easier to debug and enter less problems with the use of promises. + * @param {string} query - Query to construct object from + * @param {TDengineCursor} cursor - The cursor from which this query will execute from + * @param {boolean} execute - Whether or not to immedietely execute the query synchronously and fetch all results. Default is false. + * @property {string} query - The current query in string format the TaosQuery object represents + * @return {TaosQuery} + * @since 1.0.6 + */ +function TaosQuery(query = "", cursor = null, execute = false) { + this.query = query; + this._cursor = cursor; + if (execute == true) { + return this.execute(); + } + return this; +} + +/** + * Executes the query object and returns a Promise + * @memberof TaosQuery + * @return {Promise} A promise that resolves with a TaosResult object, or rejects with an error + * @since 1.0.6 + */ +TaosQuery.prototype.execute = async function execute() { + var taosQuery = this; //store the current instance of taosQuery to avoid async issues? + var executionPromise = new Promise(function(resolve, reject) { + let data = []; + let fields = []; + let result; + try { + taosQuery._cursor.execute(taosQuery.query); + if (taosQuery._cursor._fields) fields = taosQuery._cursor._fields; + if (taosQuery._cursor._result != null) data = taosQuery._cursor.fetchall(); + result = new TaosResult(data, fields) + } + catch(err) { + reject(err); + } + resolve(result) + + }); + return executionPromise; +} + +/** + * Executes the query object asynchronously and returns a Promise. Completes query to completion. + * @memberof TaosQuery + * @param {Object} options - Execution options + * @return {Promise} A promise that resolves with a TaosResult object, or rejects with an error + * @since 1.2.0 + */ +TaosQuery.prototype.execute_a = async function execute_a(options = {}) { + var executionPromise = new Promise( (resolve, reject) => { + + }); + var fres; + var frej; + var fetchPromise = new Promise( (resolve, reject) => { + fres = resolve; + frej = reject; + }); + let asyncCallbackFetchall = async function(param, res, numOfRows, blocks) { + if (numOfRows > 0) { + // Likely a query like insert + fres(); + } + else { + fres(new TaosResult(blocks.data, blocks.fields)); + } + } + let asyncCallback = async function(param, res, code) { + //upon success, we fetchall results + this._cursor.fetchall_a(res, options, asyncCallbackFetchall, {}); + } + this._cursor.execute_a(this.query, asyncCallback.bind(this), {}); + return fetchPromise; +} + +/** + * Bind arguments to the query and automatically parses them into the right format + * @param {array | ...args} args - A number of arguments to bind to each ? in the query + * @return {TaosQuery} + * @example + * // An example of binding a javascript date and a number to a query + * var query = cursor.query("select count(*) from meterinfo.meters where ts <= ? and areaid = ?").bind(new Date(), 3); + * var promise1 = query.execute(); + * promise1.then(function(result) { + * result.pretty(); // Log the prettified version of the results. + * }); + * @since 1.0.6 + */ +TaosQuery.prototype.bind = function bind(f, ...args) { + if (typeof f == 'object' && f.constructor.name != 'Array') args.unshift(f); //param is not an array object + else if (typeof f != 'object') args.unshift(f); + else { args = f; } + args.forEach(function(arg) { + if (arg.constructor.name == 'TaosTimestamp') arg = "\"" + arg.toTaosString() + "\""; + else if (arg.constructor.name == 'Date') arg = "\"" + toTaosTSString(arg) + "\""; + else if (typeof arg == 'string') arg = "\"" + arg + "\""; + this.query = this.query.replace(/\?/,arg); + }, this); + return this; +} diff --git a/tests/connectorTest/nodejsTest/nodetaos/taosresult.js b/tests/connectorTest/nodejsTest/nodetaos/taosresult.js new file mode 100644 index 0000000000..4138ebbec6 --- /dev/null +++ b/tests/connectorTest/nodejsTest/nodetaos/taosresult.js @@ -0,0 +1,85 @@ +require('./globalfunc.js') +const TaosObjects = require('./taosobjects'); +const TaosRow = TaosObjects.TaosRow; +const TaosField = TaosObjects.TaosField; + +module.exports = TaosResult; +/** + * @class TaosResult + * @classdesc A TaosResult class consts of the row data and the fields metadata, all wrapped under various objects for higher functionality. + * @param {Array} data - Array of result rows + * @param {Array} fields - Array of field meta data + * @property {Array} data - Array of TaosRows forming the result data (this does not include field meta data) + * @property {Array} fields - Array of TaosFields forming the fields meta data array. + * @return {TaosResult} + * @since 1.0.6 + */ +function TaosResult(data, fields) { + this.data = data.map(row => new TaosRow(row)); + this.rowcount = this.data.length; + this.fields = fields.map(field => new TaosField(field)); +} +/** + * Pretty print data and the fields meta data as if you were using the taos shell + * @memberof TaosResult + * @function pretty + * @since 1.0.6 + */ + +TaosResult.prototype.pretty = function pretty() { + let fieldsStr = ""; + let sizing = []; + this.fields.forEach((field,i) => { + if (field._field.type == 8 || field._field.type == 10){ + sizing.push(Math.max(field.name.length, field._field.bytes)); + } + else { + sizing.push(Math.max(field.name.length, suggestedMinWidths[field._field.type])); + } + fieldsStr += fillEmpty(Math.floor(sizing[i]/2 - field.name.length / 2)) + field.name + fillEmpty(Math.ceil(sizing[i]/2 - field.name.length / 2)) + " | "; + }); + var sumLengths = sizing.reduce((a,b)=> a+=b,(0)) + sizing.length * 3; + + console.log("\n" + fieldsStr); + console.log(printN("=",sumLengths)); + this.data.forEach(row => { + let rowStr = ""; + row.data.forEach((entry, i) => { + if (this.fields[i]._field.type == 9) { + entry = entry.toTaosString(); + } else { + entry = entry == null ? 'null' : entry.toString(); + } + rowStr += entry + rowStr += fillEmpty(sizing[i] - entry.length) + " | "; + }); + console.log(rowStr); + }); +} +const suggestedMinWidths = { + 0: 4, + 1: 4, + 2: 4, + 3: 6, + 4: 11, + 5: 12, + 6: 24, + 7: 24, + 8: 10, + 9: 25, + 10: 10, +} +function printN(s, n) { + let f = ""; + for (let i = 0; i < n; i ++) { + f += s; + } + return f; +} +function fillEmpty(n) { + let str = ""; + for (let i = 0; i < n; i++) { + str += " "; + } + return str; +} diff --git a/tests/connectorTest/nodejsTest/readme.md b/tests/connectorTest/nodejsTest/readme.md new file mode 100644 index 0000000000..26a28afbdd --- /dev/null +++ b/tests/connectorTest/nodejsTest/readme.md @@ -0,0 +1,161 @@ +# TDengine Node.js connector +[![minzip](https://img.shields.io/bundlephobia/minzip/td2.0-connector.svg)](https://github.com/taosdata/TDengine/tree/master/src/connector/nodejs) [![NPM](https://img.shields.io/npm/l/td2.0-connector.svg)](https://github.com/taosdata/TDengine/#what-is-tdengine) + +This is the Node.js library that lets you connect to [TDengine](https://www.github.com/taosdata/tdengine) 2.0 version. It is built so that you can use as much of it as you want or as little of it as you want through providing an extensive API. If you want the raw data in the form of an array of arrays for the row data retrieved from a table, you can do that. If you want to wrap that data with objects that allow you easily manipulate and display data such as using a prettifier function, you can do that! + +## Installation + +To get started, just type in the following to install the connector through [npm](https://www.npmjs.com/) + +```cmd +npm install td2.0-connector +``` + +To interact with TDengine, we make use of the [node-gyp](https://github.com/nodejs/node-gyp) library. To install, you will need to install the following depending on platform (the following instructions are quoted from node-gyp) + +### On Linux + +- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) +- `make` +- A proper C/C++ compiler toolchain, like [GCC](https://gcc.gnu.org) +- `node` (between `v10.x` and `v11.x`, other version has some dependency compatibility problems) + +### On macOS + +- `python` (`v2.7` recommended, `v3.x.x` is **not** supported) (already installed on macOS) + +- Xcode + + - You also need to install the + + ``` + Command Line Tools + ``` + + via Xcode. You can find this under the menu + + ``` + Xcode -> Preferences -> Locations + ``` + + (or by running + + ``` + xcode-select --install + ``` + + in your Terminal) + + - This step will install `gcc` and the related toolchain containing `make` + +### On Windows + +#### Option 1 + +Install all the required tools and configurations using Microsoft's [windows-build-tools](https://github.com/felixrieseberg/windows-build-tools) using `npm install --global --production windows-build-tools` from an elevated PowerShell or CMD.exe (run as Administrator). + +#### Option 2 + +Install tools and configuration manually: + +- Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) (using "Visual C++ build tools" workload) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) (using the "Desktop development with C++" workload) +- Install [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` is not supported), and run `npm config set python python2.7` (or see below for further instructions on specifying the proper Python version and path.) +- Launch cmd, `npm config set msvs_version 2017` + +If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips. + +To target native ARM64 Node.js on Windows 10 on ARM, add the components "Visual C++ compilers and libraries for ARM64" and "Visual C++ ATL for ARM64". + +## Usage + +The following is a short summary of the basic usage of the connector, the full api and documentation can be found [here](http://docs.taosdata.com/node) + +### Connection + +To use the connector, first require the library ```td2.0-connector```. Running the function ```taos.connect``` with the connection options passed in as an object will return a TDengine connection object. The required connection option is ```host```, other options if not set, will be the default values as shown below. + +A cursor also needs to be initialized in order to interact with TDengine from Node.js. + +```javascript +const taos = require('td2.0-connector'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}) +var cursor = conn.cursor(); // Initializing a new cursor +``` + +Close a connection + +```javascript +conn.close(); +``` + +### Queries + +We can now start executing simple queries through the ```cursor.query``` function, which returns a TaosQuery object. + +```javascript +var query = cursor.query('show databases;') +``` + +We can get the results of the queries through the ```query.execute()``` function, which returns a promise that resolves with a TaosResult object, which contains the raw data and additional functionalities such as pretty printing the results. + +```javascript +var promise = query.execute(); +promise.then(function(result) { + result.pretty(); //logs the results to the console as if you were in the taos shell +}); +``` + +You can also query by binding parameters to a query by filling in the question marks in a string as so. The query will automatically parse what was binded and convert it to the proper format for use with TDengine +```javascript +var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5); +query.execute().then(function(result) { + result.pretty(); +}) +``` + +The TaosQuery object can also be immediately executed upon creation by passing true as the second argument, returning a promise instead of a TaosQuery. +```javascript +var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true) +promise.then(function(result) { + result.pretty(); +}) +``` + +If you want to execute queries without objects being wrapped around the data, use ```cursor.execute()``` directly and ```cursor.fetchall()``` to retrieve data if there is any. +```javascript +cursor.execute('select count(*), avg(v1), min(v2) from meterinfo.meters where ts >= \"2019-07-20 00:00:00.000\";'); +var data = cursor.fetchall(); +console.log(cursor.fields); // Latest query's Field metadata is stored in cursor.fields +console.log(cursor.data); // Latest query's result data is stored in cursor.data, also returned by fetchall. +``` + +### Async functionality + +Async queries can be performed using the same functions such as `cursor.execute`, `TaosQuery.query`, but now with `_a` appended to them. + +Say you want to execute an two async query on two separate tables, using `cursor.query`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object. + +```javascript +var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a() +var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a(); +promise1.then(function(result) { + result.pretty(); +}) +promise2.then(function(result) { + result.pretty(); +}) +``` + +## Example + +An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) (The preferred method for using the connector) + +An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) + +## Contributing to TDengine + +Please follow the [contribution guidelines](https://github.com/taosdata/TDengine/blob/master/CONTRIBUTING.md) to contribute to the project. + +## License + +[GNU AGPL v3.0](http://www.gnu.org/licenses/agpl-3.0.html) diff --git a/tests/connectorTest/nodejsTest/tdengine.js b/tests/connectorTest/nodejsTest/tdengine.js new file mode 100644 index 0000000000..047c744a4f --- /dev/null +++ b/tests/connectorTest/nodejsTest/tdengine.js @@ -0,0 +1,4 @@ +var TDengineConnection = require('./nodetaos/connection.js') +module.exports.connect = function (connection={}) { + return new TDengineConnection(connection); +} diff --git a/tests/connectorTest/nodejsTest/test/nanosecondTest.js b/tests/connectorTest/nodejsTest/test/nanosecondTest.js new file mode 100644 index 0000000000..36fb398ea4 --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/nanosecondTest.js @@ -0,0 +1,351 @@ +const taos = require('../tdengine'); +var conn = taos.connect({config:"/etc/taos"}); +var c1 = conn.cursor(); + + +function checkData(sql,row,col,data){ + + c1.execute(sql) + var d = c1.fetchall(); + let checkdata = d[row][col]; + if (checkdata == data) { + + // console.log('check pass') + } + else{ + console.log('check failed') + console.log(checkdata) + console.log(data) + + + } +} + + +// nano basic case + +c1.execute('reset query cache') +c1.execute('drop database if exists db') +c1.execute('create database db precision "ns";') +c1.execute('use db'); +c1.execute('create table tb (ts timestamp, speed int)') +c1.execute('insert into tb values(\'2021-06-10 00:00:00.100000001\', 1);') +c1.execute('insert into tb values(1623254400150000000, 2);') +c1.execute('import into tb values(1623254400300000000, 3);') +c1.execute('import into tb values(1623254400299999999, 4);') +c1.execute('insert into tb values(1623254400300000001, 5);') +c1.execute('insert into tb values(1623254400999999999, 7);') +c1.execute('insert into tb values(1623254400123456789, 8);') +sql = 'select * from tb;' + +console.log('*******************************************') +console.log('this is area about checkdata result') +//check data about insert data +checkData(sql,0,0,'2021-06-10 00:00:00.100000001') +checkData(sql,1,0,'2021-06-10 00:00:00.123456789') +checkData(sql,2,0,'2021-06-10 00:00:00.150000000') +checkData(sql,3,0,'2021-06-10 00:00:00.299999999') //error +checkData(sql,4,0,'2021-06-10 00:00:00.300000000') +checkData(sql,5,0,'2021-06-10 00:00:00.300000001') +checkData(sql,6,0,'2021-06-10 00:00:00.999999999') //error + +// // us basic case + +// c1.execute('reset query cache') +// c1.execute('drop database if exists db') +// c1.execute('create database db precision "us";') +// c1.execute('use db'); +// c1.execute('create table tb (ts timestamp, speed int)') +// c1.execute('insert into tb values(\'2021-06-10 00:00:00.100001\', 1);') +// c1.execute('insert into tb values(1623254400150000, 2);') +// c1.execute('import into tb values(1623254400300000, 3);') +// c1.execute('import into tb values(1623254400299999, 4);') +// c1.execute('insert into tb values(1623254400300001, 5);') +// c1.execute('insert into tb values(1623254400999999, 7);') +// c1.execute('insert into tb values(1623254400123789, 8);') +// sql = 'select * from tb;' + +// console.log('*******************************************') + +// //check data about insert data +// checkData(sql,0,0,'2021-06-10 00:00:00.100001') +// checkData(sql,1,0,'2021-06-10 00:00:00.123789') +// checkData(sql,2,0,'2021-06-10 00:00:00.150000') +// checkData(sql,3,0,'2021-06-10 00:00:00.299999') +// checkData(sql,4,0,'2021-06-10 00:00:00.300000') +// checkData(sql,5,0,'2021-06-10 00:00:00.300001') +// checkData(sql,6,0,'2021-06-10 00:00:00.999999') + +// console.log('*******************************************') + +// // ms basic case + +// c1.execute('reset query cache') +// c1.execute('drop database if exists db') +// c1.execute('create database db precision "ms";') +// c1.execute('use db'); +// c1.execute('create table tb (ts timestamp, speed int)') +// c1.execute('insert into tb values(\'2021-06-10 00:00:00.101\', 1);') +// c1.execute('insert into tb values(1623254400150, 2);') +// c1.execute('import into tb values(1623254400300, 3);') +// c1.execute('import into tb values(1623254400299, 4);') +// c1.execute('insert into tb values(1623254400301, 5);') +// c1.execute('insert into tb values(1623254400789, 7);') +// c1.execute('insert into tb values(1623254400999, 8);') +// sql = 'select * from tb;' + +// console.log('*******************************************') +// console.log('this is area about checkdata result') +// //check data about insert data +// checkData(sql,0,0,'2021-06-10 00:00:00.101') +// checkData(sql,1,0,'2021-06-10 00:00:00.150') +// checkData(sql,2,0,'2021-06-10 00:00:00.299') +// checkData(sql,3,0,'2021-06-10 00:00:00.300') +// checkData(sql,4,0,'2021-06-10 00:00:00.301') +// checkData(sql,5,0,'2021-06-10 00:00:00.789') +// checkData(sql,6,0,'2021-06-10 00:00:00.999') + +console.log('*******************************************') + +// offfical query result to show +// console.log('this is area about fetch all data') +// var query = c1.query(sql) +// var promise = query.execute(); +// promise.then(function(result) { +// result.pretty(); +// }); + +console.log('*******************************************') +// checkData(sql,3,1,3) +// checkData(sql,4,1,5) +// checkData(sql,5,1,7) + + + + + + +// checkData(3,1,3) +// checkData(4,1,5) +// checkData(5,1,7) + +// tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;') +// tdSql.checkData(0,0,1) +// tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';') +// tdSql.checkData(0,0,1) + +// tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;') +// tdSql.checkData(0,0,1) +// tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';') +// tdSql.checkData(0,0,1) + +// tdSql.query('select count(*) from tb where ts > 1623254400400000000;') +// tdSql.checkData(0,0,1) +// tdSql.query('select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb where ts > now + 400000000b;') +// tdSql.checkRows(0) + +// tdSql.query('select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';') +// tdSql.checkData(0,0,6) + +// tdSql.query('select count(*) from tb where ts <= 1623254400300000000;') +// tdSql.checkData(0,0,4) + +// tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';') +// tdSql.checkRows(0) + +// tdSql.query('select count(*) from tb where ts = 1623254400150000000;') +// tdSql.checkData(0,0,1) + +// tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';') +// tdSql.checkData(0,0,1) + +// tdSql.query('select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';') +// tdSql.checkData(0,0,3) + +// tdSql.query('select avg(speed) from tb interval(5000000000b);') +// tdSql.checkRows(1) + +// tdSql.query('select avg(speed) from tb interval(100000000b)') +// tdSql.checkRows(4) + +// tdSql.error('select avg(speed) from tb interval(1b);') +// tdSql.error('select avg(speed) from tb interval(999b);') + +// tdSql.query('select avg(speed) from tb interval(1000b);') +// tdSql.checkRows(5) + +// tdSql.query('select avg(speed) from tb interval(1u);') +// tdSql.checkRows(5) + +// tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b);') +// tdSql.checkRows(4) + +// tdSql.query('select last(*) from tb') +// tdSql.checkData(0,0, '2021-06-10 0:00:00.999999999') +// tdSql.checkData(0,0, 1623254400999999999) + +// tdSql.query('select first(*) from tb') +// tdSql.checkData(0,0, 1623254400100000001) +// tdSql.checkData(0,0, '2021-06-10 0:00:00.100000001') + +// c1.execute('insert into tb values(now + 500000000b, 6);') +// tdSql.query('select * from tb;') +// tdSql.checkRows(7) + +// tdLog.debug('testing nanosecond support in other timestamps') +// c1.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);') +// c1.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');') +// c1.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);') +// c1.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);') +// c1.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);') +// c1.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);') +// c1.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);') + +// tdSql.query('select * from tb2;') +// tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001') +// tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000') +// tdSql.checkData(2,1,4) +// tdSql.checkData(3,1,3) +// tdSql.checkData(4,2,'2021-06-11 00:00:00.300000001') +// tdSql.checkData(5,2,'2021-06-13 00:00:00.999999999') +// tdSql.checkRows(6) +// tdSql.query('select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;') +// tdSql.checkData(0,0,1) +// tdSql.query('select count(*) from tb2 where ts2 > \'2021-06-11 0:00:00.100000000\' and ts2 < \'2021-06-11 0:00:00.100000002\';') +// tdSql.checkData(0,0,1) + +// tdSql.query('select count(*) from tb2 where ts2 > 1623340800500000000;') +// tdSql.checkData(0,0,1) +// tdSql.query('select count(*) from tb2 where ts2 < \'2021-06-11 0:00:00.400000000\';') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb2 where ts2 > now + 400000000b;') +// tdSql.checkRows(0) + +// tdSql.query('select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';') +// tdSql.checkData(0,0,6) + +// tdSql.query('select count(*) from tb2 where ts2 <= 1623340800400000000;') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.000000000\';') +// tdSql.checkRows(0) + +// tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';') +// tdSql.checkData(0,0,1) + +// tdSql.query('select count(*) from tb2 where ts2 = 1623340800300000001;') +// tdSql.checkData(0,0,1) + +// tdSql.query('select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';') +// tdSql.checkData(0,0,3) + +// tdSql.query('select count(*) from tb2 where ts2 <> 1623513600999999999;') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000000\';') +// tdSql.checkData(0,0,6) + +// tdSql.query('select count(*) from tb2 where ts2 != 1623513600999999999;') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';') +// tdSql.checkData(0,0,5) + +// tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000000\';') +// tdSql.checkData(0,0,6) + +// c1.execute('insert into tb2 values(now + 500000000b, 6, now +2d);') +// tdSql.query('select * from tb2;') +// tdSql.checkRows(7) + +// tdLog.debug('testing ill nanosecond format handling') +// c1.execute('create table tb3 (ts timestamp, speed int);') + +// tdSql.error('insert into tb3 values(16232544001500000, 2);') +// c1.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456\', 2);') +// tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456000\';') +// tdSql.checkRows(1) + +// c1.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456789000\', 2);') +// tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456789\';') +// tdSql.checkRows(1) + +// # check timezone support + +// c1.execute('use db;') +// c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +// c1.execute('insert into tb1 using st tags("2021-06-10 0:00:00.123456789" , 1 ) values("2021-06-10 0:00:00.123456789+07:00" , 1.0);' ) +// tdSql.query("select first(*) from tb1;") +// tdSql.checkData(0,0,1623258000123456789) +// c1.execute('insert into tb1 using st tags("2021-06-10 0:00:00.123456789" , 1 ) values("2021-06-10T0:00:00.123456789+06:00" , 2.0);' ) +// tdSql.query("select last(*) from tb1;") +// tdSql.checkData(0,0,1623261600123456789) + +// c1.execute('create database usdb precision "us";') +// c1.execute('use usdb;') +// c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +// c1.execute('insert into tb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10 0:00:00.123456+07:00" , 1.0);' ) +// res = tdSql.getResult("select first(*) from tb1;") +// print(res) +// if res == [(datetime.datetime(2021, 6, 10, 1, 0, 0, 123456), 1.0)]: +// tdLog.info('check timezone pass about us database') + +// c1.execute('create database msdb precision "ms";') +// c1.execute('use msdb;') +// c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') +// c1.execute('insert into tb1 using st tags("2021-06-10 0:00:00.123" , 1 ) values("2021-06-10 0:00:00.123+07:00" , 1.0);' ) +// res = tdSql.getResult("select first(*) from tb1;") +// print(res) +// if res ==[(datetime.datetime(2021, 6, 10, 1, 0, 0, 123000), 1.0)]: +// tdLog.info('check timezone pass about ms database') + + + + + + + + + + +// c1.execute('create database if not exists ' + dbname + ' precision "ns"'); +// c1.execute('use ' + dbname) +// c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +// c1.execute('insert into tstest values(1625801548423914405, 0)'); +// // Select +// console.log('select * from tstest'); +// c1.execute('select * from tstest'); + +// var d = c1.fetchall(); +// console.log(c1.fields); +// let ts = d[0][0]; +// console.log(ts); + +// if (ts.taosTimestamp() != 1625801548423914405) { +// throw "nanosecond not match!"; +// } +// if (ts.getNanoseconds() % 1000000 !== 914405) { +// throw "nanosecond precision error"; +// } +// setTimeout(function () { +// c1.query('drop database nodejs_ns_test;'); +// }, 200); + +// setTimeout(function () { +// conn.close(); +// }, 2000); + + diff --git a/tests/connectorTest/nodejsTest/test/performance.js b/tests/connectorTest/nodejsTest/test/performance.js new file mode 100644 index 0000000000..ea197f0344 --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/performance.js @@ -0,0 +1,89 @@ +function memoryUsageData() { + let s = process.memoryUsage() + for (key in s) { + s[key] = (s[key]/1000000).toFixed(3) + "MB"; + } + return s; +} +console.log("initial mem usage:", memoryUsageData()); + +const { PerformanceObserver, performance } = require('perf_hooks'); +const taos = require('../tdengine'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0}); +var c1 = conn.cursor(); + +// Initialize env +c1.execute('create database if not exists td_connector_test;'); +c1.execute('use td_connector_test;') +c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));'); +c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));') + + +// Insertion into single table Performance Test +var dataPrepTime = 0; +var insertTime = 0; +var insertTime5000 = 0; +var avgInsert5ktime = 0; +const obs = new PerformanceObserver((items) => { + let entry = items.getEntries()[0]; + + if (entry.name == 'Data Prep') { + dataPrepTime += entry.duration; + } + else if (entry.name == 'Insert'){ + insertTime += entry.duration + } + else { + console.log(entry.name + ': ' + (entry.duration/1000).toFixed(8) + 's'); + } + performance.clearMarks(); +}); +obs.observe({ entryTypes: ['measure'] }); + +function R(l,r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} +function insertN(n) { + for (let i = 0; i < n; i++) { + performance.mark('A3'); + let insertData = ["now + " + i + "m", // Timestamp + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt + parseFloat( R(-3.4E38, 3.4E38) ), // Float + parseFloat( R(-1.7E308, 1.7E308) ), // Double + "\"Long Binary\"", // Binary + parseInt( R(-32767, 32767) ), // Small Int + parseInt( R(-127, 127) ), // Tiny Int + randomBool(), + "\"Nchars 一些中文字幕\""]; // Bool + let query = 'insert into td_connector_test.all_types values(' + insertData.join(',') + ' );'; + performance.mark('B3'); + performance.measure('Data Prep', 'A3', 'B3'); + performance.mark('A2'); + c1.execute(query, {quiet:true}); + performance.mark('B2'); + performance.measure('Insert', 'A2', 'B2'); + if ( i % 5000 == 4999) { + console.log("Insert # " + (i+1)); + console.log('Insert 5k records: ' + ((insertTime - insertTime5000)/1000).toFixed(8) + 's'); + insertTime5000 = insertTime; + avgInsert5ktime = (avgInsert5ktime/1000 * Math.floor(i / 5000) + insertTime5000/1000) / Math.ceil( i / 5000); + console.log('DataPrepTime So Far: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time So Far: ' + (insertTime/1000).toFixed(8) + 's | Avg. Insert 5k time: ' + avgInsert5ktime.toFixed(8)); + + + } + } +} +performance.mark('insert 1E5') +insertN(1E5); +performance.mark('insert 1E5 2') +performance.measure('Insert With Logs', 'insert 1E5', 'insert 1E5 2'); +console.log('DataPrepTime: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time: ' + (insertTime/1000).toFixed(8) + 's'); +dataPrepTime = 0; insertTime = 0; +//'insert into td_connector_test.all_types values (now, null,null,null,null,null,null,null,null,null);' diff --git a/tests/connectorTest/nodejsTest/test/test.js b/tests/connectorTest/nodejsTest/test/test.js new file mode 100644 index 0000000000..caf05955da --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/test.js @@ -0,0 +1,170 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length-1) + "\""; +} +function R(l,r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +c1.execute('create database if not exists td_connector_test;'); +c1.execute('use td_connector_test;') +c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));'); +c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));') + +// Shell Test : The following uses the cursor to imitate the taos shell + +// Insert +for (let i = 0; i < 10000; i++) { + let insertData = ["now+" + i + "s", // Timestamp + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int + parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt + parseFloat( R(-3.4E38, 3.4E38) ), // Float + parseFloat( R(-1.7E30, 1.7E30) ), // Double + "\"Long Binary\"", // Binary + parseInt( R(-32767, 32767) ), // Small Int + parseInt( R(-127, 127) ), // Tiny Int + randomBool(), + "\"Nchars\""]; // Bool + c1.execute('insert into td_connector_test.all_types values(' + insertData.join(',') + ' );', {quiet:true}); + if (i % 1000 == 0) { + console.log("Insert # " , i); + } +} + +// Select +console.log('select * from td_connector_test.all_types limit 3 offset 100;'); +c1.execute('select * from td_connector_test.all_types limit 2 offset 100;'); + +var d = c1.fetchall(); +console.log(c1.fields); +console.log(d); + +// Functions +console.log('select count(*), avg(_int), sum(_float), max(_bigint), min(_double) from td_connector_test.all_types;') +c1.execute('select count(*), avg(_int), sum(_float), max(_bigint), min(_double) from td_connector_test.all_types;'); +var d = c1.fetchall(); +console.log(c1.fields); +console.log(d); + +// Immediate Execution like the Shell + +c1.query('select count(*), stddev(_double), min(_tinyint) from all_types where _tinyint > 50 and _int < 0;', true).then(function(result){ + result.pretty(); +}) + +c1.query('select _tinyint, _bool from all_types where _tinyint > 50 and _int < 0 limit 50;', true).then(function(result){ + result.pretty(); +}) + +c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types;', true).then(function(result){ + result.pretty(); +}) +c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types interval(1m) limit 100;', true).then(function(result){ + result.pretty(); +}) + +// Binding arguments, and then using promise +var q = c1.query('select _nchar from td_connector_test.all_types where ts >= ? and _int > ? limit 100 offset 40;').bind(new Date(1231), 100) +console.log(q.query); +q.execute().then(function(r) { + r.pretty(); +}); + + +// test query null value +c1.execute("create table if not exists td_connector_test.weather(ts timestamp, temperature float, humidity int) tags(location nchar(64))"); +c1.execute("insert into t1 using weather tags('北京') values(now, 11.11, 11)"); +c1.execute("insert into t1(ts, temperature) values(now, 22.22)"); +c1.execute("insert into t1(ts, humidity) values(now, 33)"); +c1.query('select * from test.t1', true).then(function (result) { + result.pretty(); +}); + +var q = c1.query('select * from td_connector_test.weather'); +console.log(q.query); +q.execute().then(function(r) { + r.pretty(); +}); + +function sleep(sleepTime) { + for(var start = +new Date; +new Date - start <= sleepTime; ) { } +} + +sleep(10000); + +// Raw Async Testing (Callbacks, not promises) +function cb2(param, result, rowCount, rd) { + console.log('CB2 Callbacked!'); + console.log("RES *", result); + console.log("Async fetched", rowCount, " rows"); + console.log("Passed Param: ", param); + console.log("Fields ", rd.fields); + console.log("Data ", rd.data); +} +function cb1(param,result,code) { + console.log('CB1 Callbacked!'); + console.log("RES * ", result); + console.log("Status: ", code); + console.log("Passed Param ", param); + c1.fetchall_a(result, cb2, param); +} + +c1.execute_a("describe td_connector_test.all_types;", cb1, {myparam:3.141}); + +function cb4(param, result, rowCount, rd) { + console.log('CB4 Callbacked!'); + console.log("RES *", result); + console.log("Async fetched", rowCount, "rows"); + console.log("Passed Param: ", param); + console.log("Fields", rd.fields); + console.log("Data", rd.data); +} +// Without directly calling fetchall_a +var thisRes; +function cb3(param,result,code) { + console.log('CB3 Callbacked!'); + console.log("RES *", result); + console.log("Status:", code); + console.log("Passed Param", param); + thisRes = result; +} +//Test calling execute and fetchall seperately and not through callbacks +var param = c1.execute_a("describe td_connector_test.all_types;", cb3, {e:2.718}); +console.log("Passed Param outside of callback: ", param); +console.log(param); +setTimeout(function(){ + c1.fetchall_a(thisRes, cb4, param); +},100); + + +// Async through promises +var aq = c1.query('select count(*) from td_connector_test.all_types;',false); +aq.execute_a().then(function(data) { + data.pretty(); +}); + +c1.query('describe td_connector_test.stabletest').execute_a().then(function(r){ + r.pretty() +}); + +setTimeout(function(){ + c1.query('drop database td_connector_test;'); +},200); + +setTimeout(function(){ + conn.close(); +},2000); diff --git a/tests/connectorTest/nodejsTest/test/testMicroseconds.js b/tests/connectorTest/nodejsTest/test/testMicroseconds.js new file mode 100644 index 0000000000..cc65b3d919 --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testMicroseconds.js @@ -0,0 +1,49 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} +function R(l, r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +const dbname = 'nodejs_test_us'; +c1.execute('create database if not exists ' + dbname + ' precision "us"'); +c1.execute('use ' + dbname) +c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +c1.execute('insert into tstest values(1625801548423914, 0)'); +// Select +console.log('select * from tstest'); +c1.execute('select * from tstest'); + +var d = c1.fetchall(); +console.log(c1.fields); +let ts = d[0][0]; +console.log(ts); + +if (ts.taosTimestamp() != 1625801548423914) { + throw "microseconds not match!"; +} +if (ts.getMicroseconds() % 1000 !== 914) { + throw "micronsecond precision error"; +} +setTimeout(function () { + c1.query('drop database nodejs_us_test;'); +}, 200); + +setTimeout(function () { + conn.close(); +}, 2000); diff --git a/tests/connectorTest/nodejsTest/test/testNanoseconds.js b/tests/connectorTest/nodejsTest/test/testNanoseconds.js new file mode 100644 index 0000000000..85a7600b01 --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testNanoseconds.js @@ -0,0 +1,49 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} +function R(l, r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +const dbname = 'nodejs_test_ns'; +c1.execute('create database if not exists ' + dbname + ' precision "ns"'); +c1.execute('use ' + dbname) +c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +c1.execute('insert into tstest values(1625801548423914405, 0)'); +// Select +console.log('select * from tstest'); +c1.execute('select * from tstest'); + +var d = c1.fetchall(); +console.log(c1.fields); +let ts = d[0][0]; +console.log(ts); + +if (ts.taosTimestamp() != 1625801548423914405) { + throw "nanosecond not match!"; +} +if (ts.getNanoseconds() % 1000000 !== 914405) { + throw "nanosecond precision error"; +} +setTimeout(function () { + c1.query('drop database nodejs_ns_test;'); +}, 200); + +setTimeout(function () { + conn.close(); +}, 2000); diff --git a/tests/connectorTest/nodejsTest/test/testSubscribe.js b/tests/connectorTest/nodejsTest/test/testSubscribe.js new file mode 100644 index 0000000000..30fb3f4256 --- /dev/null +++ b/tests/connectorTest/nodejsTest/test/testSubscribe.js @@ -0,0 +1,16 @@ +const taos = require('../tdengine'); +var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10}); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; +c1.execute('use td_connector_test'); +let sub = c1.subscribe({ + restart: true, + sql: "select AVG(_int) from td_connector_test.all_Types;", + topic: 'all_Types', + interval: 1000 +}); + +c1.consumeData(sub, (data, fields) => { + console.log(data); +}); \ No newline at end of file diff --git a/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py b/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py new file mode 100644 index 0000000000..e6a4bc73ae --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/nanoTest_odbc.py @@ -0,0 +1,111 @@ +import pyodbc +import argparse +import sys + +parser = argparse.ArgumentParser(description='Access TDengine via ODBC.') +parser.add_argument('--DSN', help='DSN to use') +parser.add_argument('--UID', help='UID to use') +parser.add_argument('--PWD', help='PWD to use') +parser.add_argument('--Server', help='Server to use') +parser.add_argument('-C', metavar='CONNSTR', help='Connection string to use') + +args = parser.parse_args() + +a = 'DSN=%s'%args.DSN if args.DSN else None +b = 'UID=%s'%args.UID if args.UID else None +c = 'PWD=%s'%args.PWD if args.PWD else None +d = 'Server=%s'%args.Server if args.Server else None +conn_str = ';'.join(filter(None, [a,b,c,d])) if args.DSN else None +conn_str = conn_str if conn_str else args.C +if not conn_str: + parser.print_help(file=sys.stderr) + exit() + +print('connecting: [%s]' % conn_str) +cnxn = pyodbc.connect(conn_str, autocommit=True) +cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(10), blob nchar(10))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values('2020-10-13 06:44:00.123', 1, 127, 32767, 2147483647, 32769, 123.456, 789.987, 'hello', 'helloworld')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00.234", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd129") +##cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", 1502535178128, 9223372036854775807, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd123"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute(""" +INSERT INTO db.mt (ts,b,v1,v2,v4,v8,f4,f8,bin,blob) values (?,?,?,?,?,?,?,?,?,?) +""", +"2020-12-12 00:00:00", +'true', +'-127', +'-32767', +'-2147483647', +'-9223372036854775807', +'-1.23e10', +'-11.23e6', +'abcdefghij'.encode('utf-8'), +"人啊大发测试及abc") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hell', 'w我你z')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.v (ts timestamp, v1 tinyint, v2 smallint, name nchar(10), ts2 timestamp)") +cursor.close() + +params = [ ('2020-10-16 00:00:00.123', 19, '2111-01-02 01:02:03.123'), + ('2020-10-16 00:00:01', 41, '2111-01-02 01:02:03.423'), + ('2020-10-16 00:00:02', 57, '2111-01-02 01:02:03.153'), + ('2020-10-16 00:00:03.009', 26, '2111-01-02 01:02:03.623') ] +cursor = cnxn.cursor() +cursor.fast_executemany = True +print('py:...................') +cursor.executemany("insert into db.v (ts, v1, ts2) values (?, ?, ?)", params) +print('py:...................') +cursor.close() + +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", 4) +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() +## +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", '5') +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() + diff --git a/tests/connectorTest/odbcTest/nanosupport/odbc.go b/tests/connectorTest/odbcTest/nanosupport/odbc.go new file mode 100644 index 0000000000..4d9c760c4e --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/odbc.go @@ -0,0 +1,84 @@ +package main + +import ( + "context" + "database/sql" + "flag" + "log" + "os" + "os/signal" + "time" + _ "github.com/alexbrainman/odbc" +) + +var pool *sql.DB // Database connection pool. + +func main() { + id := flag.Int64("id", 32768, "person ID to find") + dsn := flag.String("dsn", os.Getenv("DSN"), "connection data source name") + flag.Parse() + + if len(*dsn) == 0 { + log.Fatal("missing dsn flag") + } + if *id == 0 { + log.Fatal("missing person ID") + } + var err error + + // Opening a driver typically will not attempt to connect to the database. + pool, err = sql.Open("odbc", *dsn) + if err != nil { + // This will not be a connection error, but a DSN parse error or + // another initialization error. + log.Fatal("unable to use data source name", err) + } + defer pool.Close() + + pool.SetConnMaxLifetime(0) + pool.SetMaxIdleConns(3) + pool.SetMaxOpenConns(3) + + ctx, stop := context.WithCancel(context.Background()) + defer stop() + + appSignal := make(chan os.Signal, 3) + signal.Notify(appSignal, os.Interrupt) + + go func() { + select { + case <-appSignal: + stop() + } + }() + + Ping(ctx) + + Query(ctx, *id) +} + +// Ping the database to verify DSN provided by the user is valid and the +// server accessible. If the ping fails exit the program with an error. +func Ping(ctx context.Context) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + if err := pool.PingContext(ctx); err != nil { + log.Fatalf("unable to connect to database: %v", err) + } +} + +// Query the database for the information requested and prints the results. +// If the query fails exit the program with an error. +func Query(ctx context.Context, id int64) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + var name string + err := pool.QueryRowContext(ctx, "select name from m.t").Scan(&name) + if err != nil { + log.Fatal("unable to execute search query", err) + } + log.Println("name=", name) +} + diff --git a/tests/connectorTest/odbcTest/nanosupport/odbc.py b/tests/connectorTest/odbcTest/nanosupport/odbc.py new file mode 100644 index 0000000000..cee0cf1a13 --- /dev/null +++ b/tests/connectorTest/odbcTest/nanosupport/odbc.py @@ -0,0 +1,115 @@ +import pyodbc +import argparse +import sys + +parser = argparse.ArgumentParser(description='Access TDengine via ODBC.') +parser.add_argument('--DSN', help='DSN to use') +parser.add_argument('--UID', help='UID to use') +parser.add_argument('--PWD', help='PWD to use') +parser.add_argument('--Server', help='Server to use') +parser.add_argument('-C', metavar='CONNSTR', help='Connection string to use') + +args = parser.parse_args() + +a = 'DSN=%s'%args.DSN if args.DSN else None +b = 'UID=%s'%args.UID if args.UID else None +c = 'PWD=%s'%args.PWD if args.PWD else None +d = 'Server=%s'%args.Server if args.Server else None +conn_str = ';'.join(filter(None, [a,b,c,d])) if args.DSN else None +conn_str = conn_str if conn_str else args.C +if not conn_str: + parser.print_help(file=sys.stderr) + exit() + +print('connecting: [%s]' % conn_str) +cnxn = pyodbc.connect(conn_str, autocommit=True) +cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf-8') + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.mt (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(10), blob nchar(10))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values('2020-10-13 06:44:00.123', 1, 127, 32767, 2147483647, 32769, 123.456, 789.987, 'hello', 'helloworld')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", "2020-10-13 07:06:00.234", 0, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd129") +##cursor.execute("insert into db.mt values(?,?,?,?,?,?,?,?,?,?)", 1502535178128, 9223372036854775807, 127, 32767, 32768, 32769, 123.456, 789.987, "hel后lo".encode('utf-8'), "wo哈rlxd123"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute(""" +INSERT INTO db.mt (ts,b,v1,v2,v4,v8,f4,f8,bin,blob) values (?,?,?,?,?,?,?,?,?,?) +""", +"2020-12-12 00:00:00", +'true', +'-127', +'-32767', +'-2147483647', +'-9223372036854775807', +'-1.23e10', +'-11.23e6', +'abcdefghij'.encode('utf-8'), +"人啊大发测试及abc") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("drop database if exists db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create database db"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.t (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(4), blob nchar(4))"); +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("insert into db.t values('2020-10-13 06:44:00', 1, 127, 32767, 32768, 32769, 123.456, 789.987, 'hell', 'w我你z')") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("create table db.v (ts timestamp, v1 tinyint, v2 smallint, name nchar(10), ts2 timestamp)") +cursor.close() + +cursor = cnxn.cursor() +cursor.execute("select * from db.v") +cursor.close() + +params = [ ('2020-10-16 00:00:00.123', 19, '2111-01-02 01:02:03.123'), + ('2020-10-16 00:00:01', 41, '2111-01-02 01:02:03.423'), + ('2020-10-16 00:00:02', 57, '2111-01-02 01:02:03.153'), + ('2020-10-16 00:00:03.009', 26, '2111-01-02 01:02:03.623') ] +cursor = cnxn.cursor() +cursor.fast_executemany = True +print('py:...................') +cursor.executemany("insert into db.v (ts, v1, ts2) values (?, ?, ?)", params) +print('py:...................') +cursor.close() + +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", 4) +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() +## +## cursor = cnxn.cursor() +## cursor.execute("SELECT * from db.v where v1 > ?", '5') +## row = cursor.fetchone() +## while row: +## print(row) +## row = cursor.fetchone() +## cursor.close() + diff --git a/tests/gotest/batchtest.bat b/tests/gotest/batchtest.bat index efd8961bb0..2a96ee31eb 100755 --- a/tests/gotest/batchtest.bat +++ b/tests/gotest/batchtest.bat @@ -1,3 +1,4 @@ + @echo off echo ==== start Go connector test cases test ==== cd /d %~dp0 @@ -18,3 +19,10 @@ rem case002.bat :: cd case002 :: case002.bat + + +rem cd nanosupport +rem nanoCase.bat + +:: cd nanosupport +:: nanoCase.bat \ No newline at end of file diff --git a/tests/gotest/batchtest.sh b/tests/gotest/batchtest.sh index 8f5a7fe8f0..503d77b226 100755 --- a/tests/gotest/batchtest.sh +++ b/tests/gotest/batchtest.sh @@ -19,3 +19,4 @@ go env -w GOPROXY=https://goproxy.io,direct bash ./case001/case001.sh $severIp $serverPort bash ./case002/case002.sh $severIp $serverPort #bash ./case003/case003.sh $severIp $serverPort +bash ./nanosupport/nanoCase.sh $severIp $serverPort diff --git a/tests/gotest/case001/case001.go b/tests/gotest/case001/case001.go index 9d35888f31..29bc92f2a0 100644 --- a/tests/gotest/case001/case001.go +++ b/tests/gotest/case001/case001.go @@ -12,7 +12,6 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ - package main import ( diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh index 94e5bb44e0..831e9f83ac 100644 --- a/tests/gotest/case001/case001.sh +++ b/tests/gotest/case001/case001.sh @@ -15,8 +15,7 @@ script_dir="$(dirname $(readlink -f $0))" ###### step 3: start build cd $script_dir rm -f go.* -go mod init demotest > /dev/null 2>&1 -go mod tidy > /dev/null 2>&1 -go build > /dev/null 2>&1 +go mod init demotest +go build sleep 1s ./demotest -h $1 -p $2 diff --git a/tests/gotest/case002/case002.bat b/tests/gotest/case002/case002.bat index ebec576e72..385677acae 100644 --- a/tests/gotest/case002/case002.bat +++ b/tests/gotest/case002/case002.bat @@ -1,5 +1,5 @@ @echo off -echo ==== start run cases001.go +echo ==== start run cases002.go del go.* go mod init demotest diff --git a/tests/gotest/case002/case002.go b/tests/gotest/case002/case002.go index c69da04cb2..e2ba5ea28e 100644 --- a/tests/gotest/case002/case002.go +++ b/tests/gotest/case002/case002.go @@ -43,10 +43,9 @@ func main() { os.Exit(1) } defer db.Close() - db.Exec("drop if exists database test") - db.Exec("create if not exists database test") + db.Exec("drop database if exists test") + db.Exec("create database if not exists test ") db.Exec("use test") - db.Exec("drop if exists database test") db.Exec("create table test (ts timestamp ,level int)") for i := 0; i < 10; i++ { sqlcmd := fmt.Sprintf("insert into test values(%d,%d)", ts+i, i) diff --git a/tests/gotest/case002/case002.sh b/tests/gotest/case002/case002.sh index 94e5bb44e0..d98337cce7 100644 --- a/tests/gotest/case002/case002.sh +++ b/tests/gotest/case002/case002.sh @@ -1,6 +1,6 @@ #!/bin/bash -echo "==== start run cases001.go" +echo "==== start run cases002.go" set +e #set -x diff --git a/tests/gotest/nanosupport/connector/executor.go b/tests/gotest/nanosupport/connector/executor.go new file mode 100644 index 0000000000..218ea29af3 --- /dev/null +++ b/tests/gotest/nanosupport/connector/executor.go @@ -0,0 +1,208 @@ +package connector + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/taosdata/go-utils/log" + "github.com/taosdata/go-utils/tdengine/config" + "github.com/taosdata/go-utils/tdengine/connector" + tdengineExecutor "github.com/taosdata/go-utils/tdengine/executor" +) + +type Executor struct { + executor *tdengineExecutor.Executor + ctx context.Context +} + +var Logger = log.NewLogger("taos test") + +func NewExecutor(conf *config.TDengineGo, db string, showSql bool) (*Executor, error) { + tdengineConnector, err := connector.NewTDengineConnector("go", conf) + if err != nil { + return nil, err + } + executor := tdengineExecutor.NewExecutor(tdengineConnector, db, showSql, Logger) + return &Executor{ + executor: executor, + ctx: context.Background(), + }, nil +} + +func (e *Executor) Execute(sql string) (int64, error) { + return e.executor.DoExec(e.ctx, sql) +} +func (e *Executor) Query(sql string) (*connector.Data, error) { + fmt.Println("query :", sql) + return e.executor.DoQuery(e.ctx, sql) +} +func (e *Executor) CheckData(row, col int, value interface{}, data *connector.Data) (bool, error) { + if data == nil { + return false, fmt.Errorf("data is nil") + } + if col >= len(data.Head) { + return false, fmt.Errorf("col out of data") + } + if row >= len(data.Data) { + return false, fmt.Errorf("row out of data") + } + dataValue := data.Data[row][col] + + if dataValue == nil && value != nil { + return false, fmt.Errorf("dataValue is nil but value is not nil") + } + if dataValue == nil && value == nil { + return true, nil + } + if reflect.TypeOf(dataValue) != reflect.TypeOf(value) { + return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue)) + } + switch value.(type) { + case time.Time: + t, _ := dataValue.(time.Time) + if value.(time.Time).Nanosecond() != t.Nanosecond() { + return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond()) + } + case string: + if value.(string) != dataValue.(string) { + return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string)) + } + case int8: + if value.(int8) != dataValue.(int8) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8)) + } + case int16: + if value.(int16) != dataValue.(int16) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16)) + } + case int32: + if value.(int32) != dataValue.(int32) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32)) + } + case int64: + if value.(int64) != dataValue.(int64) { + return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64)) + } + case float32: + if value.(float32) != dataValue.(float32) { + return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + } + case float64: + if value.(float64) != dataValue.(float64) { + return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + } + case bool: + if value.(bool) != dataValue.(bool) { + return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool)) + } + default: + return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value)) + } + return true, nil +} + +func (e *Executor) CheckData2(row, col int, value interface{}, data *connector.Data) { + + match, err := e.CheckData(row, col, value, data) + fmt.Println("expect data is :", value) + fmt.Println("go got data is :", data.Data[row][col]) + if err != nil { + fmt.Println(err) + } + if !match { + fmt.Println(" data not match") + + } + + /* + fmt.Println(value) + if data == nil { + // return false, fmt.Errorf("data is nil") + // fmt.Println("check failed") + } + if col >= len(data.Head) { + // return false, fmt.Errorf("col out of data") + // fmt.Println("check failed") + } + if row >= len(data.Data) { + // return false, fmt.Errorf("row out of data") + // fmt.Println("check failed") + } + dataValue := data.Data[row][col] + + if dataValue == nil && value != nil { + // return false, fmt.Errorf("dataValue is nil but value is not nil") + // fmt.Println("check failed") + } + if dataValue == nil && value == nil { + // return true, nil + fmt.Println("check pass") + } + if reflect.TypeOf(dataValue) != reflect.TypeOf(value) { + // return false, fmt.Errorf("type not match expect %s got %s", reflect.TypeOf(value), reflect.TypeOf(dataValue)) + fmt.Println("check failed") + } + switch value.(type) { + case time.Time: + t, _ := dataValue.(time.Time) + if value.(time.Time).Nanosecond() != t.Nanosecond() { + // return false, fmt.Errorf("value not match expect %d got %d", value.(time.Time).Nanosecond(), t.Nanosecond()) + // fmt.Println("check failed") + } + case string: + if value.(string) != dataValue.(string) { + // return false, fmt.Errorf("value not match expect %s got %s", value.(string), dataValue.(string)) + // fmt.Println("check failed") + } + case int8: + if value.(int8) != dataValue.(int8) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int8), dataValue.(int8)) + // fmt.Println("check failed") + } + case int16: + if value.(int16) != dataValue.(int16) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int16), dataValue.(int16)) + // fmt.Println("check failed") + } + case int32: + if value.(int32) != dataValue.(int32) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int32), dataValue.(int32)) + // fmt.Println("check failed") + } + case int64: + if value.(int64) != dataValue.(int64) { + // return false, fmt.Errorf("value not match expect %d got %d", value.(int64), dataValue.(int64)) + // fmt.Println("check failed") + } + case float32: + if value.(float32) != dataValue.(float32) { + // return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + // fmt.Println("check failed") + } + case float64: + if value.(float64) != dataValue.(float64) { + // return false, fmt.Errorf("value not match expect %f got %f", value.(float32), dataValue.(float32)) + // fmt.Println("check failed") + } + case bool: + if value.(bool) != dataValue.(bool) { + // return false, fmt.Errorf("value not match expect %t got %t", value.(bool), dataValue.(bool)) + // fmt.Println("check failed") + } + default: + // return false, fmt.Errorf("unsupport type %v", reflect.TypeOf(value)) + // fmt.Println("check failed") + } + // return true, nil + // fmt.Println("check pass") + */ +} + +func (e *Executor) CheckRow(count int, data *connector.Data) { + + if len(data.Data) != count { + fmt.Println("check failed !") + } +} diff --git a/tests/gotest/nanosupport/nanoCase.bat b/tests/gotest/nanosupport/nanoCase.bat new file mode 100644 index 0000000000..86bddd5b02 --- /dev/null +++ b/tests/gotest/nanosupport/nanoCase.bat @@ -0,0 +1,9 @@ +@echo off +echo ==== start run nanosupport.go + +del go.* +go mod init nano +go mod tidy +go build +nano.exe -h %1 -p %2 +cd .. diff --git a/tests/gotest/nanosupport/nanoCase.sh b/tests/gotest/nanosupport/nanoCase.sh new file mode 100644 index 0000000000..bec8929f14 --- /dev/null +++ b/tests/gotest/nanosupport/nanoCase.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +echo "==== start run nanosupport.go " + +set +e +#set -x + +script_dir="$(dirname $(readlink -f $0))" +#echo "pwd: $script_dir, para0: $0" + +#execName=$0 +#execName=`echo ${execName##*/}` +#goName=`echo ${execName%.*}` + +###### step 3: start build +cd $script_dir +rm -f go.* +go mod init nano +go mod tidy +go build +sleep 10s +./nano -h $1 -p $2 diff --git a/tests/gotest/nanosupport/nanosupport.go b/tests/gotest/nanosupport/nanosupport.go new file mode 100644 index 0000000000..e2f24a73c0 --- /dev/null +++ b/tests/gotest/nanosupport/nanosupport.go @@ -0,0 +1,269 @@ +package main + +import ( + "fmt" + "log" + "nano/connector" + "time" + + "github.com/taosdata/go-utils/tdengine/config" +) + +func main() { + e, err := connector.NewExecutor(&config.TDengineGo{ + Address: "root:taosdata@/tcp(127.0.0.1:6030)/", + MaxIdle: 20, + MaxOpen: 30, + MaxLifetime: 30, + }, "db", false) + if err != nil { + panic(err) + } + prepareData(e) + data, err := e.Query("select * from tb") + if err != nil { + panic(err) + } + + layout := "2006-01-02 15:04:05.999999999" + t0, _ := time.Parse(layout, "2021-06-10 00:00:00.100000001") + t1, _ := time.Parse(layout, "2021-06-10 00:00:00.150000000") + t2, _ := time.Parse(layout, "2021-06-10 00:00:00.299999999") + t3, _ := time.Parse(layout, "2021-06-10 00:00:00.300000000") + t4, _ := time.Parse(layout, "2021-06-10 00:00:00.300000001") + t5, _ := time.Parse(layout, "2021-06-10 00:00:00.999999999") + + e.CheckData2(0, 0, t0, data) + e.CheckData2(1, 0, t1, data) + e.CheckData2(2, 0, t2, data) + e.CheckData2(3, 0, t3, data) + e.CheckData2(4, 0, t4, data) + e.CheckData2(5, 0, t5, data) + e.CheckData2(3, 1, int32(3), data) + e.CheckData2(4, 1, int32(5), data) + e.CheckData2(5, 1, int32(7), data) + + fmt.Println(" start check nano support!") + + data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000001\" and ts < \"2021-06-10 0:00:00.160000000\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb where ts > \"2021-06-10 0:00:00.100000000\" and ts < \"2021-06-10 0:00:00.150000000\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts > 1623254400400000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb where ts < \"2021-06-10 00:00:00.400000000\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb where ts < now + 400000000b;") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb where ts >= \"2021-06-10 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb where ts <= 1623254400300000000;") + e.CheckData2(0, 0, int64(4), data) + + data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.000000000\";") + + data, _ = e.Query("select count(*) from tb where ts = 1623254400150000000;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts = \"2021-06-10 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb where ts between \"2021-06-10 0:00:00.299999999\" and \"2021-06-10 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(3), data) + + data, _ = e.Query("select avg(speed) from tb interval(5000000000b);") + e.CheckRow(1, data) + + data, _ = e.Query("select avg(speed) from tb interval(100000000b)") + e.CheckRow(4, data) + + data, _ = e.Query("select avg(speed) from tb interval(1000b);") + e.CheckRow(5, data) + + data, _ = e.Query("select avg(speed) from tb interval(1u);") + e.CheckRow(5, data) + + data, _ = e.Query("select avg(speed) from tb interval(100000000b) sliding (100000000b);") + e.CheckRow(4, data) + + data, _ = e.Query("select last(*) from tb") + tt, _ := time.Parse(layout, "2021-06-10 0:00:00.999999999") + e.CheckData2(0, 0, tt, data) + + data, _ = e.Query("select first(*) from tb") + tt1, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001") + e.CheckData2(0, 0, tt1, data) + + e.Execute("insert into tb values(now + 500000000b, 6);") + data, _ = e.Query("select * from tb;") + e.CheckRow(7, data) + + e.Execute("create table tb2 (ts timestamp, speed int, ts2 timestamp);") + e.Execute("insert into tb2 values(\"2021-06-10 0:00:00.100000001\", 1, \"2021-06-11 0:00:00.100000001\");") + e.Execute("insert into tb2 values(1623254400150000000, 2, 1623340800150000000);") + e.Execute("import into tb2 values(1623254400300000000, 3, 1623340800300000000);") + e.Execute("import into tb2 values(1623254400299999999, 4, 1623340800299999999);") + e.Execute("insert into tb2 values(1623254400300000001, 5, 1623340800300000001);") + e.Execute("insert into tb2 values(1623254400999999999, 7, 1623513600999999999);") + + data, _ = e.Query("select * from tb2;") + tt2, _ := time.Parse(layout, "2021-06-10 0:00:00.100000001") + tt3, _ := time.Parse(layout, "2021-06-10 0:00:00.150000000") + + e.CheckData2(0, 0, tt2, data) + e.CheckData2(1, 0, tt3, data) + e.CheckData2(2, 1, int32(4), data) + e.CheckData2(3, 1, int32(3), data) + tt4, _ := time.Parse(layout, "2021-06-11 00:00:00.300000001") + e.CheckData2(4, 2, tt4, data) + e.CheckRow(6, data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > \"2021-06-11 0:00:00.100000000\" and ts2 < \"2021-06-11 0:00:00.100000002\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 > 1623340800500000000;") + e.CheckData2(0, 0, int64(1), data) + data, _ = e.Query("select count(*) from tb2 where ts2 < \"2021-06-11 0:00:00.400000000\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 < now + 400000000b;") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 >= \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <= 1623340800400000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.000000000\";") + + data, _ = e.Query("select count(*) from tb2 where ts2 = \"2021-06-11 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 = 1623340800300000001;") + e.CheckData2(0, 0, int64(1), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 between \"2021-06-11 0:00:00.299999999\" and \"2021-06-11 0:00:00.300000001\";") + e.CheckData2(0, 0, int64(3), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> 1623513600999999999;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 <> \"2021-06-11 0:00:00.100000000\";") + e.CheckData2(0, 0, int64(6), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != 1623513600999999999;") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000001\";") + e.CheckData2(0, 0, int64(5), data) + + data, _ = e.Query("select count(*) from tb2 where ts2 != \"2021-06-11 0:00:00.100000000\";") + e.CheckData2(0, 0, int64(6), data) + + e.Execute("insert into tb2 values(now + 500000000b, 6, now +2d);") + data, _ = e.Query("select * from tb2;") + e.CheckRow(7, data) + + e.Execute("create table tb3 (ts timestamp, speed int);") + _, err = e.Execute("insert into tb3 values(16232544001500000, 2);") + if err != nil { + fmt.Println("check pass! ") + } + + e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456\", 2);") + data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456000\";") + e.CheckRow(1, data) + + e.Execute("insert into tb3 values(\"2021-06-10 0:00:00.123456789000\", 2);") + data, _ = e.Query("select * from tb3 where ts = \"2021-06-10 0:00:00.123456789\";") + e.CheckRow(1, data) + + // check timezone support + + e.Execute("drop database if exists nsdb;") + e.Execute("create database nsdb precision 'ns';") + e.Execute("use nsdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456789' , 1 ) values('2021-06-10T0:00:00.123456789+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + + ttt, _ := time.Parse(layout, "2021-06-10 01:00:00.123456789") + e.CheckData2(0, 0, ttt, data) + + e.Execute("create database usdb precision 'us';") + e.Execute("use usdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123456' , 1 ) values('2021-06-10T0:00:00.123456+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + ttt2, _ := time.Parse(layout, "2021-06-10 01:00:00.123456") + e.CheckData2(0, 0, ttt2, data) + + e.Execute("drop database if exists msdb;") + e.Execute("create database msdb precision 'ms';") + e.Execute("use msdb;") + e.Execute("create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);") + e.Execute("insert into tb1 using st tags('2021-06-10 0:00:00.123' , 1 ) values('2021-06-10T0:00:00.123+07:00' , 1.0);") + data, _ = e.Query("select first(*) from tb1;") + ttt3, _ := time.Parse(layout, "2021-06-10 01:00:00.123") + e.CheckData2(0, 0, ttt3, data) + fmt.Println("all test done!") + +} + +func prepareData(e *connector.Executor) { + sqlList := []string{ + "reset query cache;", + "drop database if exists db;", + "create database db;", + "use db;", + "reset query cache;", + "drop database if exists db;", + "create database db precision 'ns';", + "show databases;", + "use db;", + "create table tb (ts timestamp, speed int);", + "insert into tb values('2021-06-10 0:00:00.100000001', 1);", + "insert into tb values(1623254400150000000, 2);", + "import into tb values(1623254400300000000, 3);", + "import into tb values(1623254400299999999, 4);", + "insert into tb values(1623254400300000001, 5);", + "insert into tb values(1623254400999999999, 7);", + } + for _, sql := range sqlList { + err := executeSql(e, sql) + if err != nil { + log.Fatalf("prepare data error:%v, sql:%s", err, sql) + } + } +} + +func executeSql(e *connector.Executor, sql string) error { + _, err := e.Execute(sql) + if err != nil { + return err + } + return nil +} From a6fd024d04bfec06391833a09c60650c6f568716 Mon Sep 17 00:00:00 2001 From: wenzhouwww Date: Thu, 9 Sep 2021 17:07:04 +0800 Subject: [PATCH 39/60] remove codes not used --- .../nodejsTest/test/nanosecondTest.js | 351 ------------------ 1 file changed, 351 deletions(-) delete mode 100644 tests/connectorTest/nodejsTest/test/nanosecondTest.js diff --git a/tests/connectorTest/nodejsTest/test/nanosecondTest.js b/tests/connectorTest/nodejsTest/test/nanosecondTest.js deleted file mode 100644 index 36fb398ea4..0000000000 --- a/tests/connectorTest/nodejsTest/test/nanosecondTest.js +++ /dev/null @@ -1,351 +0,0 @@ -const taos = require('../tdengine'); -var conn = taos.connect({config:"/etc/taos"}); -var c1 = conn.cursor(); - - -function checkData(sql,row,col,data){ - - c1.execute(sql) - var d = c1.fetchall(); - let checkdata = d[row][col]; - if (checkdata == data) { - - // console.log('check pass') - } - else{ - console.log('check failed') - console.log(checkdata) - console.log(data) - - - } -} - - -// nano basic case - -c1.execute('reset query cache') -c1.execute('drop database if exists db') -c1.execute('create database db precision "ns";') -c1.execute('use db'); -c1.execute('create table tb (ts timestamp, speed int)') -c1.execute('insert into tb values(\'2021-06-10 00:00:00.100000001\', 1);') -c1.execute('insert into tb values(1623254400150000000, 2);') -c1.execute('import into tb values(1623254400300000000, 3);') -c1.execute('import into tb values(1623254400299999999, 4);') -c1.execute('insert into tb values(1623254400300000001, 5);') -c1.execute('insert into tb values(1623254400999999999, 7);') -c1.execute('insert into tb values(1623254400123456789, 8);') -sql = 'select * from tb;' - -console.log('*******************************************') -console.log('this is area about checkdata result') -//check data about insert data -checkData(sql,0,0,'2021-06-10 00:00:00.100000001') -checkData(sql,1,0,'2021-06-10 00:00:00.123456789') -checkData(sql,2,0,'2021-06-10 00:00:00.150000000') -checkData(sql,3,0,'2021-06-10 00:00:00.299999999') //error -checkData(sql,4,0,'2021-06-10 00:00:00.300000000') -checkData(sql,5,0,'2021-06-10 00:00:00.300000001') -checkData(sql,6,0,'2021-06-10 00:00:00.999999999') //error - -// // us basic case - -// c1.execute('reset query cache') -// c1.execute('drop database if exists db') -// c1.execute('create database db precision "us";') -// c1.execute('use db'); -// c1.execute('create table tb (ts timestamp, speed int)') -// c1.execute('insert into tb values(\'2021-06-10 00:00:00.100001\', 1);') -// c1.execute('insert into tb values(1623254400150000, 2);') -// c1.execute('import into tb values(1623254400300000, 3);') -// c1.execute('import into tb values(1623254400299999, 4);') -// c1.execute('insert into tb values(1623254400300001, 5);') -// c1.execute('insert into tb values(1623254400999999, 7);') -// c1.execute('insert into tb values(1623254400123789, 8);') -// sql = 'select * from tb;' - -// console.log('*******************************************') - -// //check data about insert data -// checkData(sql,0,0,'2021-06-10 00:00:00.100001') -// checkData(sql,1,0,'2021-06-10 00:00:00.123789') -// checkData(sql,2,0,'2021-06-10 00:00:00.150000') -// checkData(sql,3,0,'2021-06-10 00:00:00.299999') -// checkData(sql,4,0,'2021-06-10 00:00:00.300000') -// checkData(sql,5,0,'2021-06-10 00:00:00.300001') -// checkData(sql,6,0,'2021-06-10 00:00:00.999999') - -// console.log('*******************************************') - -// // ms basic case - -// c1.execute('reset query cache') -// c1.execute('drop database if exists db') -// c1.execute('create database db precision "ms";') -// c1.execute('use db'); -// c1.execute('create table tb (ts timestamp, speed int)') -// c1.execute('insert into tb values(\'2021-06-10 00:00:00.101\', 1);') -// c1.execute('insert into tb values(1623254400150, 2);') -// c1.execute('import into tb values(1623254400300, 3);') -// c1.execute('import into tb values(1623254400299, 4);') -// c1.execute('insert into tb values(1623254400301, 5);') -// c1.execute('insert into tb values(1623254400789, 7);') -// c1.execute('insert into tb values(1623254400999, 8);') -// sql = 'select * from tb;' - -// console.log('*******************************************') -// console.log('this is area about checkdata result') -// //check data about insert data -// checkData(sql,0,0,'2021-06-10 00:00:00.101') -// checkData(sql,1,0,'2021-06-10 00:00:00.150') -// checkData(sql,2,0,'2021-06-10 00:00:00.299') -// checkData(sql,3,0,'2021-06-10 00:00:00.300') -// checkData(sql,4,0,'2021-06-10 00:00:00.301') -// checkData(sql,5,0,'2021-06-10 00:00:00.789') -// checkData(sql,6,0,'2021-06-10 00:00:00.999') - -console.log('*******************************************') - -// offfical query result to show -// console.log('this is area about fetch all data') -// var query = c1.query(sql) -// var promise = query.execute(); -// promise.then(function(result) { -// result.pretty(); -// }); - -console.log('*******************************************') -// checkData(sql,3,1,3) -// checkData(sql,4,1,5) -// checkData(sql,5,1,7) - - - - - - -// checkData(3,1,3) -// checkData(4,1,5) -// checkData(5,1,7) - -// tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;') -// tdSql.checkData(0,0,1) -// tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';') -// tdSql.checkData(0,0,1) - -// tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;') -// tdSql.checkData(0,0,1) -// tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';') -// tdSql.checkData(0,0,1) - -// tdSql.query('select count(*) from tb where ts > 1623254400400000000;') -// tdSql.checkData(0,0,1) -// tdSql.query('select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb where ts > now + 400000000b;') -// tdSql.checkRows(0) - -// tdSql.query('select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';') -// tdSql.checkData(0,0,6) - -// tdSql.query('select count(*) from tb where ts <= 1623254400300000000;') -// tdSql.checkData(0,0,4) - -// tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';') -// tdSql.checkRows(0) - -// tdSql.query('select count(*) from tb where ts = 1623254400150000000;') -// tdSql.checkData(0,0,1) - -// tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';') -// tdSql.checkData(0,0,1) - -// tdSql.query('select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';') -// tdSql.checkData(0,0,3) - -// tdSql.query('select avg(speed) from tb interval(5000000000b);') -// tdSql.checkRows(1) - -// tdSql.query('select avg(speed) from tb interval(100000000b)') -// tdSql.checkRows(4) - -// tdSql.error('select avg(speed) from tb interval(1b);') -// tdSql.error('select avg(speed) from tb interval(999b);') - -// tdSql.query('select avg(speed) from tb interval(1000b);') -// tdSql.checkRows(5) - -// tdSql.query('select avg(speed) from tb interval(1u);') -// tdSql.checkRows(5) - -// tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b);') -// tdSql.checkRows(4) - -// tdSql.query('select last(*) from tb') -// tdSql.checkData(0,0, '2021-06-10 0:00:00.999999999') -// tdSql.checkData(0,0, 1623254400999999999) - -// tdSql.query('select first(*) from tb') -// tdSql.checkData(0,0, 1623254400100000001) -// tdSql.checkData(0,0, '2021-06-10 0:00:00.100000001') - -// c1.execute('insert into tb values(now + 500000000b, 6);') -// tdSql.query('select * from tb;') -// tdSql.checkRows(7) - -// tdLog.debug('testing nanosecond support in other timestamps') -// c1.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);') -// c1.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');') -// c1.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);') -// c1.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);') -// c1.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);') -// c1.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);') -// c1.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);') - -// tdSql.query('select * from tb2;') -// tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001') -// tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000') -// tdSql.checkData(2,1,4) -// tdSql.checkData(3,1,3) -// tdSql.checkData(4,2,'2021-06-11 00:00:00.300000001') -// tdSql.checkData(5,2,'2021-06-13 00:00:00.999999999') -// tdSql.checkRows(6) -// tdSql.query('select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;') -// tdSql.checkData(0,0,1) -// tdSql.query('select count(*) from tb2 where ts2 > \'2021-06-11 0:00:00.100000000\' and ts2 < \'2021-06-11 0:00:00.100000002\';') -// tdSql.checkData(0,0,1) - -// tdSql.query('select count(*) from tb2 where ts2 > 1623340800500000000;') -// tdSql.checkData(0,0,1) -// tdSql.query('select count(*) from tb2 where ts2 < \'2021-06-11 0:00:00.400000000\';') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb2 where ts2 > now + 400000000b;') -// tdSql.checkRows(0) - -// tdSql.query('select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';') -// tdSql.checkData(0,0,6) - -// tdSql.query('select count(*) from tb2 where ts2 <= 1623340800400000000;') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.000000000\';') -// tdSql.checkRows(0) - -// tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';') -// tdSql.checkData(0,0,1) - -// tdSql.query('select count(*) from tb2 where ts2 = 1623340800300000001;') -// tdSql.checkData(0,0,1) - -// tdSql.query('select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';') -// tdSql.checkData(0,0,3) - -// tdSql.query('select count(*) from tb2 where ts2 <> 1623513600999999999;') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000000\';') -// tdSql.checkData(0,0,6) - -// tdSql.query('select count(*) from tb2 where ts2 != 1623513600999999999;') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';') -// tdSql.checkData(0,0,5) - -// tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000000\';') -// tdSql.checkData(0,0,6) - -// c1.execute('insert into tb2 values(now + 500000000b, 6, now +2d);') -// tdSql.query('select * from tb2;') -// tdSql.checkRows(7) - -// tdLog.debug('testing ill nanosecond format handling') -// c1.execute('create table tb3 (ts timestamp, speed int);') - -// tdSql.error('insert into tb3 values(16232544001500000, 2);') -// c1.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456\', 2);') -// tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456000\';') -// tdSql.checkRows(1) - -// c1.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456789000\', 2);') -// tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456789\';') -// tdSql.checkRows(1) - -// # check timezone support - -// c1.execute('use db;') -// c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') -// c1.execute('insert into tb1 using st tags("2021-06-10 0:00:00.123456789" , 1 ) values("2021-06-10 0:00:00.123456789+07:00" , 1.0);' ) -// tdSql.query("select first(*) from tb1;") -// tdSql.checkData(0,0,1623258000123456789) -// c1.execute('insert into tb1 using st tags("2021-06-10 0:00:00.123456789" , 1 ) values("2021-06-10T0:00:00.123456789+06:00" , 2.0);' ) -// tdSql.query("select last(*) from tb1;") -// tdSql.checkData(0,0,1623261600123456789) - -// c1.execute('create database usdb precision "us";') -// c1.execute('use usdb;') -// c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') -// c1.execute('insert into tb1 using st tags("2021-06-10 0:00:00.123456" , 1 ) values("2021-06-10 0:00:00.123456+07:00" , 1.0);' ) -// res = tdSql.getResult("select first(*) from tb1;") -// print(res) -// if res == [(datetime.datetime(2021, 6, 10, 1, 0, 0, 123456), 1.0)]: -// tdLog.info('check timezone pass about us database') - -// c1.execute('create database msdb precision "ms";') -// c1.execute('use msdb;') -// c1.execute('create stable st (ts timestamp ,speed float ) tags(time timestamp ,id int);') -// c1.execute('insert into tb1 using st tags("2021-06-10 0:00:00.123" , 1 ) values("2021-06-10 0:00:00.123+07:00" , 1.0);' ) -// res = tdSql.getResult("select first(*) from tb1;") -// print(res) -// if res ==[(datetime.datetime(2021, 6, 10, 1, 0, 0, 123000), 1.0)]: -// tdLog.info('check timezone pass about ms database') - - - - - - - - - - -// c1.execute('create database if not exists ' + dbname + ' precision "ns"'); -// c1.execute('use ' + dbname) -// c1.execute('create table if not exists tstest (ts timestamp, _int int);'); -// c1.execute('insert into tstest values(1625801548423914405, 0)'); -// // Select -// console.log('select * from tstest'); -// c1.execute('select * from tstest'); - -// var d = c1.fetchall(); -// console.log(c1.fields); -// let ts = d[0][0]; -// console.log(ts); - -// if (ts.taosTimestamp() != 1625801548423914405) { -// throw "nanosecond not match!"; -// } -// if (ts.getNanoseconds() % 1000000 !== 914405) { -// throw "nanosecond precision error"; -// } -// setTimeout(function () { -// c1.query('drop database nodejs_ns_test;'); -// }, 200); - -// setTimeout(function () { -// conn.close(); -// }, 2000); - - From 7d81fa1a924ccdc95f8d492723726873ba3aa934 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 9 Sep 2021 18:30:33 +0800 Subject: [PATCH 40/60] [TD-6521] update table meta after altering table schema --- src/client/src/tscServer.c | 6 +++++- src/mnode/src/mnodeTable.c | 19 ++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 9d523f2730..bda2dbb2cc 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2618,7 +2618,11 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) { tfree(pTableMetaInfo->pTableMeta); if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta - taosHashClear(tscTableMetaMap); + if (pSql->res.pRsp == NULL) { + tscDebug("0x%"PRIx64" unexpected resp from mnode, super table: %s failed to update super table meta ", pSql->self, name); + return 0; + } + return tscProcessTableMetaRsp(pSql); } return 0; diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 68529ab8a2..a6158906a7 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1231,7 +1231,9 @@ static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code), pStable->numOfTags); - + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1287,6 +1289,9 @@ static int32_t mnodeDropSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, drop tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1321,6 +1326,9 @@ static int32_t mnodeModifySuperTableTagNameCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, modify tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1376,6 +1384,9 @@ static int32_t mnodeAddSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, add column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1444,6 +1455,9 @@ static int32_t mnodeDropSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, delete column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } @@ -1489,6 +1503,9 @@ static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; mLInfo("msg:%p, app:%p stable %s, change column result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, tstrerror(code)); + if (code == TSDB_CODE_SUCCESS) { + code = mnodeGetSuperTableMeta(pMsg); + } return code; } From aa79a68baaf3bf15da0091a5c30614856f76e6bd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 9 Sep 2021 19:33:10 +0800 Subject: [PATCH 41/60] [td-6563] --- src/query/src/qUtil.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 4caf351799..539c292bb3 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -436,13 +436,13 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void * } STableQueryInfo** pList = supporter->pTableQueryInfo; - - SResultRowInfo *pWindowResInfo1 = &(pList[left]->resInfo); - SResultRow * pWindowRes1 = getResultRow(pWindowResInfo1, leftPos); + SResultRow* pWindowRes1 = pList[left]->resInfo.pResult[leftPos]; +// SResultRow * pWindowRes1 = getResultRow(&(pList[left]->resInfo), leftPos); TSKEY leftTimestamp = pWindowRes1->win.skey; - SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); - SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); +// SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); +// SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); + SResultRow* pWindowRes2 = pList[right]->resInfo.pResult[rightPos]; TSKEY rightTimestamp = pWindowRes2->win.skey; if (leftTimestamp == rightTimestamp) { From e5f3730204775919329aa18b21d8d6f1e33d1b40 Mon Sep 17 00:00:00 2001 From: xywang Date: Thu, 9 Sep 2021 20:03:56 +0800 Subject: [PATCH 42/60] [TS-263]: make response of alter consistent with create and drop --- src/plugins/http/inc/httpUtil.h | 1 + src/plugins/http/src/httpRestJson.c | 23 +++++++++++++++++++---- src/plugins/http/src/httpUtil.c | 12 ++++++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/src/plugins/http/inc/httpUtil.h b/src/plugins/http/inc/httpUtil.h index 54c95b6980..21690ebca9 100644 --- a/src/plugins/http/inc/httpUtil.h +++ b/src/plugins/http/inc/httpUtil.h @@ -17,6 +17,7 @@ #define TDENGINE_HTTP_UTIL_H bool httpCheckUsedbSql(char *sql); +bool httpCheckAlterSql(char *sql); void httpTimeToString(int32_t t, char *buf, int32_t buflen); bool httpUrlMatch(HttpContext *pContext, int32_t pos, char *cmp); diff --git a/src/plugins/http/src/httpRestJson.c b/src/plugins/http/src/httpRestJson.c index 47f2d4ff5b..13596b0e8a 100644 --- a/src/plugins/http/src/httpRestJson.c +++ b/src/plugins/http/src/httpRestJson.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tglobal.h" +#include "tsclient.h" #include "httpLog.h" #include "httpJson.h" #include "httpRestHandle.h" @@ -62,13 +63,21 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) httpJsonItemToken(jsonBuf); httpJsonToken(jsonBuf, JsonArrStt); + SSqlObj *pObj = (SSqlObj *) result; + bool isAlterSql = (pObj->sqlstr == NULL) ? false : httpCheckAlterSql(pObj->sqlstr); + if (num_fields == 0) { httpJsonItemToken(jsonBuf); httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); } else { - for (int32_t i = 0; i < num_fields; ++i) { + if (isAlterSql == true) { httpJsonItemToken(jsonBuf); - httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); + } else { + for (int32_t i = 0; i < num_fields; ++i) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + } } } @@ -99,8 +108,14 @@ void restStartSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result) httpJsonItemToken(jsonBuf); httpJsonToken(jsonBuf, JsonArrStt); - httpJsonItemToken(jsonBuf); - httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + if (isAlterSql == true) { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, REST_JSON_AFFECT_ROWS, REST_JSON_AFFECT_ROWS_LEN); + } else { + httpJsonItemToken(jsonBuf); + httpJsonString(jsonBuf, fields[i].name, (int32_t)strlen(fields[i].name)); + } + httpJsonItemToken(jsonBuf); httpJsonInt(jsonBuf, fields[i].type); httpJsonItemToken(jsonBuf); diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index ade50bdad6..f30ac7326e 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -21,6 +21,7 @@ #include "httpResp.h" #include "httpSql.h" #include "httpUtil.h" +#include "ttoken.h" bool httpCheckUsedbSql(char *sql) { if (strstr(sql, "use ") != NULL) { @@ -29,6 +30,17 @@ bool httpCheckUsedbSql(char *sql) { return false; } +bool httpCheckAlterSql(char *sql) { + int32_t index = 0; + + do { + SStrToken t0 = tStrGetToken(sql, &index, false); + if (t0.type != TK_LP) { + return t0.type == TK_ALTER; + } + } while (1); +} + void httpTimeToString(int32_t t, char *buf, int32_t buflen) { memset(buf, 0, (size_t)buflen); char ts[32] = {0}; From 5addac6403e98c3e4e18a5d157b365a05c7ef8f2 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 9 Sep 2021 20:42:11 +0800 Subject: [PATCH 43/60] [TD-6521] update table meta after altering table schema --- src/rpc/src/rpcMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index e958a8e5ec..9ea5fd5392 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -407,7 +407,7 @@ void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64 if (type == TSDB_MSG_TYPE_QUERY || type == TSDB_MSG_TYPE_CM_RETRIEVE || type == TSDB_MSG_TYPE_FETCH || type == TSDB_MSG_TYPE_CM_STABLE_VGROUP || type == TSDB_MSG_TYPE_CM_TABLES_META || type == TSDB_MSG_TYPE_CM_TABLE_META - || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS) + || type == TSDB_MSG_TYPE_CM_SHOW || type == TSDB_MSG_TYPE_DM_STATUS || type == TSDB_MSG_TYPE_CM_ALTER_TABLE) pContext->connType = RPC_CONN_TCPC; pContext->rid = taosAddRef(tsRpcRefId, pContext); From 4bff5ba73f30d52e2d76579f0d5a198736bd9dd8 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 9 Sep 2021 23:17:19 +0800 Subject: [PATCH 44/60] Feature/sangshuduo/td 5844 cmdline parameters align (#7852) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py * fix windows compiler options. * make taos.exe use mysql style password input. * make taos shell and taosdump use mysql style password input. * determine scanf return value. * make console echo off during password input. * use one macro to define password length. * fix --password. change taos shell '-z' for timezone * fix password echo on darwin. * fix few lines. --- src/kit/taosdemo/taosdemo.c | 102 ++++++++++++++++++------------------ src/kit/taosdump/taosdump.c | 13 +++-- 2 files changed, 57 insertions(+), 58 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 0b742c7f1c..2358d666a9 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -125,17 +125,17 @@ enum TEST_MODE { INVAID_TEST }; -typedef enum CREATE_SUB_TALBE_MOD_EN { +typedef enum CREATE_SUB_TABLE_MOD_EN { PRE_CREATE_SUBTBL, AUTO_CREATE_SUBTBL, NO_CREATE_SUBTBL -} CREATE_SUB_TALBE_MOD_EN; +} CREATE_SUB_TABLE_MOD_EN; -typedef enum TALBE_EXISTS_EN { +typedef enum TABLE_EXISTS_EN { TBL_NO_EXISTS, TBL_ALREADY_EXISTS, TBL_EXISTS_BUTT -} TALBE_EXISTS_EN; +} TABLE_EXISTS_EN; enum enumSYNC_MODE { SYNC_MODE, @@ -251,7 +251,7 @@ typedef struct SArguments_S { int64_t insertRows; int abort; uint32_t disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms, us or ns. accordig to database precision + int disorderRange; // ms, us or ns. according to database precision uint32_t method_of_delete; uint64_t totalInsertRows; uint64_t totalAffectedRows; @@ -392,7 +392,7 @@ typedef struct SDbs_S { } SDbs; typedef struct SpecifiedQueryInfo_S { - uint64_t queryInterval; // 0: unlimit > 0 loop/s + uint64_t queryInterval; // 0: unlimited > 0 loop/s uint32_t concurrent; int sqlCount; uint32_t asyncMode; // 0: sync, 1: async @@ -413,7 +413,7 @@ typedef struct SpecifiedQueryInfo_S { typedef struct SuperQueryInfo_S { char stbName[TSDB_TABLE_NAME_LEN]; - uint64_t queryInterval; // 0: unlimit > 0 loop/s + uint64_t queryInterval; // 0: unlimited > 0 loop/s uint32_t threadCnt; uint32_t asyncMode; // 0: sync, 1: async uint64_t subscribeInterval; // ms @@ -738,10 +738,10 @@ static void printVersion() { char taosdemo_status[] = TAOSDEMO_STATUS; if (strlen(taosdemo_status) == 0) { - printf("taosdemo verison %s-%s\n", + printf("taosdemo version %s-%s\n", tdengine_ver, taosdemo_ver); } else { - printf("taosdemo verison %s-%s, status:%s\n", + printf("taosdemo version %s-%s, status:%s\n", tdengine_ver, taosdemo_ver, taosdemo_status); } } @@ -818,7 +818,7 @@ static void printHelp() { "The number of records per table. Default is 10000."); printf("%s%s%s%s\n", indent, "-M, --random", "\t\t\t", "The value of records generated are totally random."); - printf("%s\n", "\t\t\t\tThe default is to simulate power equipment senario."); + printf("%s\n", "\t\t\t\tThe default is to simulate power equipment scenario."); printf("%s%s%s%s\n", indent, "-x, --no-insert", "\t\t", "No-insert flag."); printf("%s%s%s%s\n", indent, "-y, --answer-yes", "\t\t", "Default input yes for prompt."); @@ -863,7 +863,7 @@ static void errorWrongValue(char *program, char *wrong_arg, char *wrong_value) fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); } -static void errorUnreconized(char *program, char *wrong_arg) +static void errorUnrecognized(char *program, char *wrong_arg) { fprintf(stderr, "%s: unrecognized options '%s'\n", program, wrong_arg); fprintf(stderr, "Try `taosdemo --help' or `taosdemo --usage' for more information.\n"); @@ -920,7 +920,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--file=", strlen("--file="))) { arguments->metaFile = (char *)(argv[i] + strlen("--file=")); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-c", strlen("-c"))) @@ -942,7 +942,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--config-dir=", strlen("--config-dir="))) { tstrncpy(configDir, (char *)(argv[i] + strlen("--config-dir=")), TSDB_FILENAME_LEN); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-h", strlen("-h"))) @@ -964,7 +964,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (0 == strncmp(argv[i], "--host=", strlen("--host="))) { arguments->host = (char *)(argv[i] + strlen("--host=")); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-PP") == 0) { @@ -998,7 +998,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->port = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-I", strlen("-I"))) @@ -1059,7 +1059,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } i++; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-u", strlen("-u"))) @@ -1081,7 +1081,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->user = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-p", strlen("-p"))) @@ -1115,7 +1115,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->output_file = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-s", strlen("-s"))) @@ -1137,7 +1137,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->sqlFile = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-q", strlen("-q"))) @@ -1175,7 +1175,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->async_mode = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-T", strlen("-T"))) @@ -1213,7 +1213,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->nthreads = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-i", strlen("-i"))) @@ -1251,7 +1251,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->insert_interval = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-S", strlen("-S"))) @@ -1289,7 +1289,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->async_mode = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-qt") == 0) { @@ -1335,7 +1335,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->interlace_rows = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-r", strlen("-r"))) @@ -1373,7 +1373,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->reqPerReq = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-t", strlen("-t"))) @@ -1411,7 +1411,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->ntables = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1451,7 +1451,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->insertRows = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-d", strlen("-d"))) @@ -1473,7 +1473,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->database = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-l", strlen("-l"))) @@ -1512,12 +1512,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->columnCount = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } if (arguments->columnCount > MAX_NUM_COLUMNS) { - printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS); + printf("WARNING: max acceptable columns count is %d\n", MAX_NUM_COLUMNS); prompt(); arguments->columnCount = MAX_NUM_COLUMNS; } @@ -1552,7 +1552,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } dataType = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1687,7 +1687,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->binwidth = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-m", strlen("-m"))) @@ -1709,7 +1709,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->tb_prefix = argv[++i]; } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "-N") == 0) @@ -1774,7 +1774,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->disorderRange = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } } else if ((0 == strncmp(argv[i], "-O", strlen("-O"))) @@ -1812,7 +1812,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->disorderRatio = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1866,7 +1866,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } arguments->replica = atoi(argv[++i]); } else { - errorUnreconized(argv[0], argv[i]); + errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); } @@ -1878,7 +1878,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-D") == 0) { arguments->method_of_delete = atoi(argv[++i]); if (arguments->method_of_delete > 3) { - errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n"); + errorPrint("%s", "\n\t-D need a value (0~3) number following!\n"); exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "--version") == 0) @@ -1893,7 +1893,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { printf(" Usage: taosdemo [-f JSONFILE] [-u USER] [-p PASSWORD] [-c CONFIG_DIR]\n\ [-h HOST] [-P PORT] [-I INTERFACE] [-d DATABASE] [-a REPLICA]\n\ [-m TABLEPREFIX] [-s SQLFILE] [-N] [-o OUTPUTFILE] [-q QUERYMODE]\n\ - [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUNNS] [-T THREADNUMBER]\n\ + [-b DATATYPES] [-w WIDTH_OF_BINARY] [-l COLUMNS] [-T THREADNUMBER]\n\ [-i SLEEPTIME] [-S TIME_STEP] [-B INTERLACE_ROWS] [-t TABLES]\n\ [-n RECORDS] [-M] [-x] [-y] [-O ORDERMODE] [-R RANGE] [-a REPLIcA][-g]\n\ [--help] [--usage] [--version]\n"); @@ -2466,9 +2466,9 @@ static int printfInsertMeta() { printf(" database[%d] name: \033[33m%s\033[0m\n", i, g_Dbs.db[i].dbName); if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33mno\033[0m\n"); + printf(" drop: \033[33m no\033[0m\n"); } else { - printf(" drop: \033[33myes\033[0m\n"); + printf(" drop: \033[33m yes\033[0m\n"); } if (g_Dbs.db[i].dbCfg.blocks > 0) { @@ -2577,9 +2577,9 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].insertRows); /* if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); + printf(" multiThreadWriteOneTbl: \033[33m no\033[0m\n"); }else { - printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); + printf(" multiThreadWriteOneTbl: \033[33m yes\033[0m\n"); } */ printf(" interlaceRows: \033[33m%u\033[0m\n", @@ -5137,7 +5137,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - // super_talbes + // super_tables cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); if (!stables || stables->type != cJSON_Array) { errorPrint("%s", "failed to read json, super_tables not found\n"); @@ -5474,7 +5474,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } } else if (!insertInterval) { - verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n", + verbosePrint("%s() LN%d: stable insert interval be overrode by global %"PRIu64".\n", __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { @@ -9534,7 +9534,7 @@ static void *readTable(void *sarg) { insertRows = g_args.insertRows; // } - int64_t ntables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; + int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; int64_t totalData = insertRows * ntables; bool do_aggreFunc = g_Dbs.do_aggreFunc; @@ -9603,7 +9603,7 @@ static void *readMetric(void *sarg) { } int64_t insertRows = pThreadInfo->stbInfo->insertRows; - int64_t ntables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; + int64_t ntables = pThreadInfo->ntables; // pThreadInfo->end_table_to - pThreadInfo->start_table_from + 1; int64_t totalData = insertRows * ntables; bool do_aggreFunc = g_Dbs.do_aggreFunc; @@ -9709,7 +9709,7 @@ static int insertTestProcess() { } free(cmdBuffer); - // pretreatement + // pretreatment if (prepareSampleData() != 0) { if (g_fpOfInsertResult) fclose(g_fpOfInsertResult); @@ -10141,7 +10141,7 @@ static void stable_sub_callback( if (param) fetchResult(res, (threadInfo *)param); - // tao_unscribe() will free result. + // tao_unsubscribe() will free result. } static void specified_sub_callback( @@ -10154,7 +10154,7 @@ static void specified_sub_callback( if (param) fetchResult(res, (threadInfo *)param); - // tao_unscribe() will free result. + // tao_unsubscribe() will free result. } static TAOS_SUB* subscribeImpl( @@ -10494,12 +10494,12 @@ static int subscribeTestProcess() { //==== create threads for query for specified table if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, sepcified query sqlCount %d.\n", + debugPrint("%s() LN%d, specified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); } else { if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint2("%s() LN%d, sepcified query sqlCount %d.\n", + errorPrint2("%s() LN%d, specified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); exit(EXIT_FAILURE); diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index ae2193a82e..fe7616fa17 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -1209,14 +1209,14 @@ _dump_db_point: fprintf(fp, "USE %s;\n\n", g_dbInfos[0]->name); - int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 + int32_t totalNumOfThread = 1; // 0: all normal table into .tables.tmp.0 int normalTblFd = -1; int32_t retCode; int superTblCnt = 0 ; for (int i = 1; g_args.arg_list[i]; i++) { if (taosGetTableRecordInfo(g_args.arg_list[i], &tableRecordInfo, taos) < 0) { - errorPrint("input the invalide table %s\n", + errorPrint("input the invalid table %s\n", g_args.arg_list[i]); continue; } @@ -1341,11 +1341,10 @@ static int taosGetTableDes( return count; } - // if chidl-table have tag, using select tagName from table to get tagValue + // if child-table have tag, using select tagName from table to get tagValue for (int i = 0 ; i < count; i++) { if (strcmp(stableDes->cols[i].note, "TAG") != 0) continue; - sprintf(sqlstr, "select %s from %s.%s", stableDes->cols[i].field, dbName, table); @@ -2443,7 +2442,7 @@ static int taosGetFilesNum(const char *directoryName, } if (fileNum <= 0) { - errorPrint("directory:%s is empry\n", directoryName); + errorPrint("directory:%s is empty\n", directoryName); exit(-1); } @@ -2620,9 +2619,9 @@ static int taosDumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, memcpy(cmd + cmd_len, line, read_len); cmd[read_len + cmd_len]= '\0'; if (queryDbImpl(taos, cmd)) { - errorPrint("%s() LN%d, error sql: linenu:%d, file:%s\n", + errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", __func__, __LINE__, lineNo, fileName); - fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName); } memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); From b19ebda76b094e8c5968641e5702aea0a9f9a14d Mon Sep 17 00:00:00 2001 From: VULKAN <65299153+CTZxVULKAN@users.noreply.github.com> Date: Fri, 10 Sep 2021 03:34:03 +0000 Subject: [PATCH 45/60] Depriciated documentaion fix. Fixed depreciated documentation for timestamp resolutions in the English documentation. --- documentation20/en/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index 2b0b4f2814..e88d757cb0 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -37,7 +37,7 @@ With TDengine, the most important thing is timestamp. When creating and insertin - Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000. - Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units. -Default time precision of TDengine is millisecond, you can change it to microseocnd by setting parameter enableMicrosecond. +TDengiene's default timestamp is set to be accurate to a millisecond. Starting from `v2.1.5.0` precisions upto microseconds and nanoseconds are supported, this can be achieved by setting the Precision parameters passed during CREATE DATABASE. In TDengine, the following 10 data types can be used in data model of an ordinary table. From 41537a0e637f12b5197173343e90cc6a995836ce Mon Sep 17 00:00:00 2001 From: Ganlin Zhao <36554565+glzhao89@users.noreply.github.com> Date: Fri, 10 Sep 2021 13:31:19 +0800 Subject: [PATCH 46/60] Update docs.md --- documentation20/en/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index e88d757cb0..23d8ef69dc 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -37,7 +37,7 @@ With TDengine, the most important thing is timestamp. When creating and insertin - Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000. - Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units. -TDengiene's default timestamp is set to be accurate to a millisecond. Starting from `v2.1.5.0` precisions upto microseconds and nanoseconds are supported, this can be achieved by setting the Precision parameters passed during CREATE DATABASE. +TDengiene's default timestamp is set to be accurate to a millisecond. Starting from `v2.1.5.0` precisions upto microseconds and nanoseconds are supported, this can be achieved by setting the PRECISION parameters passed during CREATE DATABASE. In TDengine, the following 10 data types can be used in data model of an ordinary table. From 3c6d59cefc5660a2dacf5b525966f0ad2cbb28e8 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao <36554565+glzhao89@users.noreply.github.com> Date: Fri, 10 Sep 2021 13:32:54 +0800 Subject: [PATCH 47/60] Update docs.md --- documentation20/en/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index 23d8ef69dc..7f1b46c7f0 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -37,7 +37,7 @@ With TDengine, the most important thing is timestamp. When creating and insertin - Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000. - Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units. -TDengiene's default timestamp is set to be accurate to a millisecond. Starting from `v2.1.5.0` precisions upto microseconds and nanoseconds are supported, this can be achieved by setting the PRECISION parameters passed during CREATE DATABASE. +TDengiene's default timestamp is set to be accurate to a millisecond. Starting from `v2.1.5.0` precisions upto microseconds and nanoseconds are supported, this can be achieved by setting the PRECISION parameter passed during CREATE DATABASE. In TDengine, the following 10 data types can be used in data model of an ordinary table. From 7152a19728f0c98992b99cc373bcee8daf6aaa2b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Sep 2021 14:19:50 +0800 Subject: [PATCH 48/60] [td-6563] refactor the group result merge function. --- src/client/src/tscServer.c | 5 +++ src/query/inc/qExecutor.h | 10 ++++- src/query/src/qExecutor.c | 5 ++- src/query/src/qUtil.c | 85 +++++++++++++++++++++++++++++++++++--- 4 files changed, 97 insertions(+), 8 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 788574a837..3a57d333ad 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1097,6 +1097,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tsBuf.tsOrder = htonl(pQueryInfo->tsBuf->tsOrder); pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen); pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks); + } else { + pQueryMsg->tsBuf.tsLen = 0; + pQueryMsg->tsBuf.tsNumOfBlocks = 0; } int32_t numOfOperator = (int32_t) taosArrayGetSize(queryOperator); @@ -1134,6 +1137,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pUdfInfo->contLen; } + } else { + pQueryMsg->udfContentOffset = 0; } memcpy(pMsg, pSql->sqlstr, sqlLen); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 31db6492f6..19ca8e7ed8 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -86,11 +86,18 @@ typedef struct SResultRow { char *key; // start key of current result row } SResultRow; +typedef struct SResultRowCell { + uint64_t groupId; + SResultRow *pRow; +} SResultRowCell; + typedef struct SGroupResInfo { int32_t totalGroup; int32_t currentGroup; int32_t index; SArray* pRows; // SArray + bool ordered; + int32_t position; } SGroupResInfo; /** @@ -284,8 +291,9 @@ typedef struct SQueryRuntimeEnv { SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file SHashObj* pResultRowHashTable; // quick locate the window object for each result SHashObj* pResultRowListSet; // used to check if current ResultRowInfo has ResultRow object or not + SArray* pResultRowArrayList; // The array list that contains the Result rows char* keyBuf; // window key buffer - SResultRowPool* pool; // window result object pool + SResultRowPool* pool; // The window result objects pool, all the resultRow Objects are allocated and managed by this object. char** prevRow; SArray* prevResult; // intermediate result, SArray diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index ddc14f4cef..5ca2e58cc9 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -544,6 +544,8 @@ static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResult // add a new result set for a new group taosHashPut(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pResult, POINTER_BYTES); + SResultRowCell cell = {.groupId = tableGroupId, .pRow = pResult}; + taosArrayPush(pRuntimeEnv->pResultRowArrayList, &cell); } else { pResult = *p1; } @@ -2110,6 +2112,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables * 10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES); pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); + pRuntimeEnv->pResultRowArrayList = taosArrayInit(numOfTables, sizeof(SResultRowCell)); pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize); pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen); @@ -6379,6 +6382,7 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { if (!pRuntimeEnv->pQueryAttr->stableQuery) { sortGroupResByOrderList(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); } + toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) { @@ -8647,7 +8651,6 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* SArray* prevResult = NULL; if (prevResultLen > 0) { prevResult = interResFromBinary(param->prevResult, prevResultLen); - pRuntimeEnv->prevResult = prevResult; } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 539c292bb3..961b388c39 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -456,7 +456,79 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void * } } -static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, +int32_t tsAscOrder(const void* p1, const void* p2) { + SResultRowCell* pc1 = (SResultRowCell*) p1; + SResultRowCell* pc2 = (SResultRowCell*) p2; + + if (pc1->groupId == pc2->groupId) { + if (pc1->pRow->win.skey == pc2->pRow->win.skey) { + return 0; + } else { + return (pc1->pRow->win.skey < pc2->pRow->win.skey)? -1:1; + } + } else { + return (pc1->groupId < pc2->groupId)? -1:1; + } +} + +int32_t tsDescOrder(const void* p1, const void* p2) { + SResultRowCell* pc1 = (SResultRowCell*) p1; + SResultRowCell* pc2 = (SResultRowCell*) p2; + + if (pc1->groupId == pc2->groupId) { + if (pc1->pRow->win.skey == pc2->pRow->win.skey) { + return 0; + } else { + return (pc1->pRow->win.skey < pc2->pRow->win.skey)? 1:-1; + } + } else { + return (pc1->groupId < pc2->groupId)? -1:1; + } +} + +void orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) { + __compar_fn_t fn = NULL; + if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) { + fn = tsAscOrder; + } else { + fn = tsDescOrder; + } + + taosArraySort(pRuntimeEnv->pResultRowArrayList, fn); +} + +static int32_t mergeIntoGroupResultImplRv(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, uint64_t groupId, int32_t* rowCellInfoOffset) { + if (!pGroupResInfo->ordered) { + orderTheResultRows(pRuntimeEnv); + pGroupResInfo->ordered = true; + } + + if (pGroupResInfo->pRows == NULL) { + pGroupResInfo->pRows = taosArrayInit(100, POINTER_BYTES); + } + + size_t len = taosArrayGetSize(pRuntimeEnv->pResultRowArrayList); + for(; pGroupResInfo->position < len; ++pGroupResInfo->position) { + + SResultRowCell* pResultRowCell = taosArrayGet(pRuntimeEnv->pResultRowArrayList, pGroupResInfo->position); + if (pResultRowCell->groupId != groupId) { + break; + } + + int64_t num = getNumOfResultWindowRes(pRuntimeEnv, pResultRowCell->pRow, rowCellInfoOffset); + if (num <= 0) { + continue; + } + + taosArrayPush(pGroupResInfo->pRows, &pResultRowCell->pRow); + pResultRowCell->pRow->numOfRows = (uint32_t) num; + + } + + return TSDB_CODE_SUCCESS; +} + +static UNUSED_FUNC int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList, int32_t* rowCellInfoOffset) { bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQueryAttr); @@ -562,12 +634,13 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu int64_t st = taosGetTimestampUs(); while (pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) { - SArray *group = GET_TABLEGROUP(pRuntimeEnv, pGroupResInfo->currentGroup); +// SArray *group = GET_TABLEGROUP(pRuntimeEnv, pGroupResInfo->currentGroup); - int32_t ret = mergeIntoGroupResultImpl(pRuntimeEnv, pGroupResInfo, group, offset); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } + mergeIntoGroupResultImplRv(pRuntimeEnv, pGroupResInfo, pGroupResInfo->currentGroup, offset); +// int32_t ret = mergeIntoGroupResultImpl(pRuntimeEnv, pGroupResInfo, group, offset); +// if (ret != TSDB_CODE_SUCCESS) { +// return ret; +// } // this group generates at least one result, return results if (taosArrayGetSize(pGroupResInfo->pRows) > 0) { From df16603548353778a36e3bfebd7339391c3aa3e9 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao <36554565+glzhao89@users.noreply.github.com> Date: Fri, 10 Sep 2021 14:24:42 +0800 Subject: [PATCH 49/60] Update docs.md --- documentation20/en/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index 7f1b46c7f0..c6b934277b 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -37,7 +37,7 @@ With TDengine, the most important thing is timestamp. When creating and insertin - Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000. - Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units. -TDengiene's default timestamp is set to be accurate to a millisecond. Starting from `v2.1.5.0` precisions upto microseconds and nanoseconds are supported, this can be achieved by setting the PRECISION parameter passed during CREATE DATABASE. +TDengine's timestamp is set to millisecond resolution by default. Microsecond/nanosecond resolutions can be set using CREATE DATABASE with PRECISION parameter.(Nanosecond resolution is supported from version 2.1.5.0 onwards) In TDengine, the following 10 data types can be used in data model of an ordinary table. From c6be1bb809536182f7d4f27c0d8267b3b25c9354 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Sep 2021 14:26:32 +0800 Subject: [PATCH 50/60] [td-6563]fix memory leak problem. --- src/query/src/qExecutor.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 5ca2e58cc9..677add2c8d 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2387,6 +2387,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); taosArrayDestroyEx(pRuntimeEnv->prevResult, freeInterResult); + taosArrayDestroy(pRuntimeEnv->pResultRowArrayList); pRuntimeEnv->prevResult = NULL; } From 62707345458342907bc5e38602acc8f02350745c Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 10 Sep 2021 14:30:01 +0800 Subject: [PATCH 51/60] Update docs.md --- documentation20/en/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/12.taos-sql/docs.md b/documentation20/en/12.taos-sql/docs.md index c6b934277b..7aaeb6c32b 100644 --- a/documentation20/en/12.taos-sql/docs.md +++ b/documentation20/en/12.taos-sql/docs.md @@ -37,7 +37,7 @@ With TDengine, the most important thing is timestamp. When creating and insertin - Epch Time: a timestamp value can also be a long integer representing milliseconds since 1970-01-01 08:00:00.000. - Arithmetic operations can be applied to timestamp. For example: now-2h represents a timestamp which is 2 hours ago from the current server time. Units include u( microsecond), a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). In `select * from t1 where ts > now-2w and ts <= now-1w`, which queries data of the whole week before two weeks. To specify the interval of down sampling, you can also use n(calendar month) and y(calendar year) as time units. -TDengine's timestamp is set to millisecond resolution by default. Microsecond/nanosecond resolutions can be set using CREATE DATABASE with PRECISION parameter.(Nanosecond resolution is supported from version 2.1.5.0 onwards) +TDengine's timestamp is set to millisecond accuracy by default. Microsecond/nanosecond accuracy can be set using CREATE DATABASE with PRECISION parameter. (Nanosecond resolution is supported from version 2.1.5.0 onwards.) In TDengine, the following 10 data types can be used in data model of an ordinary table. From 394896cb1d2c2e1c48dd876e8a57c507c355b54c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 10 Sep 2021 15:42:38 +0800 Subject: [PATCH 52/60] [td-6563] --- src/client/src/tscServer.c | 3 +-- src/query/src/qExecutor.c | 1 - src/query/src/qUtil.c | 9 --------- 3 files changed, 1 insertion(+), 12 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 3a57d333ad..c583e566b9 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -947,7 +947,6 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->pointInterpQuery = query.pointInterpQuery; pQueryMsg->needReverseScan = query.needReverseScan; pQueryMsg->stateWindow = query.stateWindow; - pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->sqlstrLen = htonl(sqlLen); pQueryMsg->sw.gap = htobe64(query.sw.gap); @@ -974,7 +973,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tableCols[i].type = htons(pCol->type); //pQueryMsg->tableCols[i].flist.numOfFilters = htons(pCol->flist.numOfFilters); pQueryMsg->tableCols[i].flist.numOfFilters = 0; - + pQueryMsg->tableCols[i].flist.filterInfo = 0; // append the filter information after the basic column information //serializeColFilterInfo(pCol->flist.filterInfo, pCol->flist.numOfFilters, &pMsg); } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 677add2c8d..cf5142d359 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -4812,7 +4812,6 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; pQueryAttr->tsdb = tsdb; - if (tsdb != NULL) { int32_t code = setupQueryHandle(tsdb, pRuntimeEnv, pQInfo->qId, pQueryAttr->stableQuery); if (code != TSDB_CODE_SUCCESS) { diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 961b388c39..bc27e094db 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -509,7 +509,6 @@ static int32_t mergeIntoGroupResultImplRv(SQueryRuntimeEnv *pRuntimeEnv, SGroupR size_t len = taosArrayGetSize(pRuntimeEnv->pResultRowArrayList); for(; pGroupResInfo->position < len; ++pGroupResInfo->position) { - SResultRowCell* pResultRowCell = taosArrayGet(pRuntimeEnv->pResultRowArrayList, pGroupResInfo->position); if (pResultRowCell->groupId != groupId) { break; @@ -522,7 +521,6 @@ static int32_t mergeIntoGroupResultImplRv(SQueryRuntimeEnv *pRuntimeEnv, SGroupR taosArrayPush(pGroupResInfo->pRows, &pResultRowCell->pRow); pResultRowCell->pRow->numOfRows = (uint32_t) num; - } return TSDB_CODE_SUCCESS; @@ -634,13 +632,7 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu int64_t st = taosGetTimestampUs(); while (pGroupResInfo->currentGroup < pGroupResInfo->totalGroup) { -// SArray *group = GET_TABLEGROUP(pRuntimeEnv, pGroupResInfo->currentGroup); - mergeIntoGroupResultImplRv(pRuntimeEnv, pGroupResInfo, pGroupResInfo->currentGroup, offset); -// int32_t ret = mergeIntoGroupResultImpl(pRuntimeEnv, pGroupResInfo, group, offset); -// if (ret != TSDB_CODE_SUCCESS) { -// return ret; -// } // this group generates at least one result, return results if (taosArrayGetSize(pGroupResInfo->pRows) > 0) { @@ -656,7 +648,6 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu qDebug("QInfo:%"PRIu64" merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", GET_QID(pRuntimeEnv), pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime); -// pQInfo->summary.firstStageMergeTime += elapsedTime; return TSDB_CODE_SUCCESS; } From d42ed2913890d844cf9852f15752c11adfb304a5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 10 Sep 2021 15:45:49 +0800 Subject: [PATCH 53/60] Hotfix/sangshuduo/td 5872 taosdemo stmt improve (#7853) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [TD-5872]: taosdemo stmt improve. * refactor stmt functions. * [TD-5872]: taosdemo stmt csv perf improve. * rand func back to early impl. * fix windows/mac compile error. * fix empty tag sample. * [TD-5873]add stmt’performance taosdemo testcase * add data_type enum and stmt_batch framework. * use data type enum and fix test case limit/offset. * revert thread number. * rename MAX_SAMPLES_ONCE_FROM_FILE to reflect reality. * split func for stmt interlace. Co-authored-by: Shuduo Sang Co-authored-by: tomchon --- src/kit/taosdemo/taosdemo.c | 616 +++++++++++++++--- .../insertInterlaceRowsLarge1M.json | 2 +- 2 files changed, 528 insertions(+), 90 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 2358d666a9..ae9289e3b0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -244,7 +244,7 @@ typedef struct SArguments_S { uint64_t insert_interval; uint64_t timestamp_step; int64_t query_times; - uint32_t interlace_rows; + uint32_t interlaceRows; uint32_t reqPerReq; // num_of_records_per_req uint64_t max_sql_len; int64_t ntables; @@ -451,14 +451,13 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; + int64_t *bind_ts; #if STMT_BIND_PARAM_BATCH == 1 - int64_t *bind_ts; int64_t *bind_ts_array; char *bindParams; char *is_null; #else - int64_t *bind_ts; char* sampleBindArray; #endif @@ -607,8 +606,8 @@ char *g_rand_current_buff = NULL; char *g_rand_phase_buff = NULL; char *g_randdouble_buff = NULL; -char *g_aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", - "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; +char *g_aggreFunc[] = {"*", "count(*)", "avg(C0)", "sum(C0)", + "max(C0)", "min(C0)", "first(C0)", "last(C0)"}; SArguments g_args = { NULL, // metaFile @@ -652,7 +651,7 @@ SArguments g_args = { 0, // insert_interval DEFAULT_TIMESTAMP_STEP, // timestamp_step 1, // query_times - DEFAULT_INTERLACE_ROWS, // interlace_rows; + DEFAULT_INTERLACE_ROWS, // interlaceRows; 30000, // reqPerReq (1024*1024), // max_sql_len DEFAULT_CHILDTABLES, // ntables @@ -1310,17 +1309,17 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "B"); exit(EXIT_FAILURE); } - arguments->interlace_rows = atoi(argv[++i]); + arguments->interlaceRows = atoi(argv[++i]); } else if (0 == strncmp(argv[i], "--interlace-rows=", strlen("--interlace-rows="))) { if (isStringNumber((char *)(argv[i] + strlen("--interlace-rows=")))) { - arguments->interlace_rows = atoi((char *)(argv[i]+strlen("--interlace-rows="))); + arguments->interlaceRows = atoi((char *)(argv[i]+strlen("--interlace-rows="))); } else { errorPrintReqArg2(argv[0], "--interlace-rows"); exit(EXIT_FAILURE); } } else if (0 == strncmp(argv[i], "-B", strlen("-B"))) { if (isStringNumber((char *)(argv[i] + strlen("-B")))) { - arguments->interlace_rows = atoi((char *)(argv[i]+strlen("-B"))); + arguments->interlaceRows = atoi((char *)(argv[i]+strlen("-B"))); } else { errorPrintReqArg2(argv[0], "-B"); exit(EXIT_FAILURE); @@ -1333,7 +1332,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { errorPrintReqArg2(argv[0], "--interlace-rows"); exit(EXIT_FAILURE); } - arguments->interlace_rows = atoi(argv[++i]); + arguments->interlaceRows = atoi(argv[++i]); } else { errorUnrecognized(argv[0], argv[i]); exit(EXIT_FAILURE); @@ -4859,15 +4858,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { if (interlaceRows->valueint < 0) { - errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); + errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); goto PARSE_OVER; } - g_args.interlace_rows = interlaceRows->valueint; + g_args.interlaceRows = interlaceRows->valueint; } else if (!interlaceRows) { - g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + g_args.interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); + errorPrint("%s", "failed to read json, interlaceRows input mistake\n"); goto PARSE_OVER; } @@ -4929,13 +4928,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } // rows per table need be less than insert batch - if (g_args.interlace_rows > g_args.reqPerReq) { + if (g_args.interlaceRows > g_args.reqPerReq) { printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", - g_args.interlace_rows, g_args.reqPerReq); + g_args.interlaceRows, g_args.reqPerReq); printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", g_args.reqPerReq); prompt(); - g_args.interlace_rows = g_args.reqPerReq; + g_args.interlaceRows = g_args.reqPerReq; } cJSON* dbs = cJSON_GetObjectItem(root, "databases"); @@ -8462,13 +8461,13 @@ static void printStatPerThread(threadInfo *pThreadInfo) ); } -// sync write interlace data -static void* syncWriteInterlace(threadInfo *pThreadInfo) { - debugPrint("[%d] %s() LN%d: ### interlace write\n", +#if STMT_BIND_PARAM_BATCH == 1 +// stmt sync write interlace data +static void* syncWriteInterlaceStmtBatch(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", pThreadInfo->threadID, __func__, __LINE__); int64_t insertRows; - uint32_t interlaceRows; uint64_t maxSqlLen; int64_t nTimeStampStep; uint64_t insert_interval; @@ -8477,19 +8476,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if (stbInfo) { insertRows = stbInfo->insertRows; - - if ((stbInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; - } else { - interlaceRows = stbInfo->interlaceRows; - } maxSqlLen = stbInfo->maxSqlLen; nTimeStampStep = stbInfo->timeStampStep; insert_interval = stbInfo->insertInterval; } else { insertRows = g_args.insertRows; - interlaceRows = g_args.interlace_rows; maxSqlLen = g_args.max_sql_len; nTimeStampStep = g_args.timestamp_step; insert_interval = g_args.insert_interval; @@ -8500,9 +8491,456 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); - if (interlaceRows > insertRows) - interlaceRows = insertRows; + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; + if (interlaceRows > g_args.reqPerReq) + interlaceRows = g_args.reqPerReq; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + g_args.reqPerReq / interlaceRows; + } else { + batchPerTblTimes = 1; + } + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; + + uint64_t generatedRecPerTbl = 0; + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + uint32_t recOfBatch = 0; + + int32_t generated; + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + if (stbInfo) { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } else { + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batchPerTbl, startTime); + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + batchPerTbl, + insertRows, i, + startTime); + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace_stmt; + } else if (generated == 0) { + break; + } + + tableSeq ++; + recOfBatch += batchPerTbl; + + pThreadInfo->totalInsertRows += batchPerTbl; + + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); + + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; + + startTime = pThreadInfo->start_time + + generatedRecPerTbl * nTimeStampStep; + + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; + + int64_t remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + + if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) + break; + } + + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); + + if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) + break; + } + + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + + startTs = taosGetTimestampUs(); + + if (recOfBatch == 0) { + errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl); + if (batchPerTbl > 0) { + errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", + batchPerTbl, maxSqlLen / batchPerTbl); + } + goto free_of_interlace_stmt; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows); + goto free_of_interlace_stmt; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace_stmt: + printStatPerThread(pThreadInfo); + return NULL; +} +#else +// stmt sync write interlace data +static void* syncWriteInterlaceStmt(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### stmt interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + uint64_t maxSqlLen; + int64_t nTimeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + maxSqlLen = stbInfo->maxSqlLen; + nTimeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + maxSqlLen = g_args.max_sql_len; + nTimeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); + + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; + + if (interlaceRows > g_args.reqPerReq) + interlaceRows = g_args.reqPerReq; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + g_args.reqPerReq / interlaceRows; + } else { + batchPerTblTimes = 1; + } + + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; + + uint64_t st = 0; + uint64_t et = UINT64_MAX; + + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; + + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; + + uint64_t generatedRecPerTbl = 0; + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + + uint32_t recOfBatch = 0; + + int32_t generated; + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; + + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint2("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + } + + if (stbInfo) { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } else { + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batchPerTbl, startTime); + generated = prepareStmtWithoutStb( + pThreadInfo, + tableName, + batchPerTbl, + insertRows, i, + startTime); + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint2("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace_stmt; + } else if (generated == 0) { + break; + } + + tableSeq ++; + recOfBatch += batchPerTbl; + + pThreadInfo->totalInsertRows += batchPerTbl; + + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); + + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; + + startTime = pThreadInfo->start_time + + generatedRecPerTbl * nTimeStampStep; + + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; + + int64_t remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + + if (pThreadInfo->ntables * batchPerTbl < g_args.reqPerReq) + break; + } + + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); + + if ((g_args.reqPerReq - recOfBatch) < batchPerTbl) + break; + } + + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + + startTs = taosGetTimestampUs(); + + if (recOfBatch == 0) { + errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl); + if (batchPerTbl > 0) { + errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", + batchPerTbl, maxSqlLen / batchPerTbl); + } + goto free_of_interlace_stmt; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampUs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %10.2f ms\n", + __func__, __LINE__, delay / 1000.0); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows); + goto free_of_interlace_stmt; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); + + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } + } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); + +free_of_interlace_stmt: + printStatPerThread(pThreadInfo); + return NULL; +} + +#endif + +// sync write interlace data +static void* syncWriteInterlace(threadInfo *pThreadInfo, uint32_t interlaceRows) { + debugPrint("[%d] %s() LN%d: ### interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + uint64_t maxSqlLen; + int64_t nTimeStampStep; + uint64_t insert_interval; + + SSuperTable* stbInfo = pThreadInfo->stbInfo; + + if (stbInfo) { + insertRows = stbInfo->insertRows; + maxSqlLen = stbInfo->maxSqlLen; + nTimeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + } else { + insertRows = g_args.insertRows; + maxSqlLen = g_args.max_sql_len; + nTimeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } + + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); +#if 1 if (interlaceRows > g_args.reqPerReq) interlaceRows = g_args.reqPerReq; @@ -8515,7 +8953,22 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } else { batchPerTblTimes = 1; } +#else + uint32_t batchPerTbl; + if (interlaceRows > g_args.reqPerReq) + batchPerTbl = g_args.reqPerReq; + else + batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; + + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + interlaceRows / batchPerTbl; + } else { + batchPerTblTimes = 1; + } +#endif pThreadInfo->buffer = calloc(maxSqlLen, 1); if (NULL == pThreadInfo->buffer) { errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", @@ -8548,6 +9001,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { st = taosGetTimestampMs(); flagSleep = false; } + // generate data memset(pThreadInfo->buffer, 0, maxSqlLen); uint64_t remainderBufLen = maxSqlLen; @@ -8576,47 +9030,23 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { uint64_t oldRemainderLen = remainderBufLen; if (stbInfo) { - if (stbInfo->iface == STMT_IFACE) { - generated = prepareStbStmtWithSample( - pThreadInfo, - tableName, - tableSeq, - batchPerTbl, - insertRows, 0, - startTime, - &(pThreadInfo->samplePos)); - } else { - generated = generateStbInterlaceData( - pThreadInfo, - tableName, batchPerTbl, i, - batchPerTblTimes, - tableSeq, - pstr, - insertRows, - startTime, - &remainderBufLen); - } + generated = generateStbInterlaceData( + pThreadInfo, + tableName, batchPerTbl, i, + batchPerTblTimes, + tableSeq, + pstr, + insertRows, + startTime, + &remainderBufLen); } else { - if (g_args.iface == STMT_IFACE) { - debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, - tableName, batchPerTbl, startTime); - generated = prepareStmtWithoutStb( - pThreadInfo, - tableName, - batchPerTbl, - insertRows, i, - startTime); - } else { - generated = generateInterlaceDataWithoutStb( - tableName, batchPerTbl, - tableSeq, - pThreadInfo->db_name, pstr, - insertRows, - startTime, - &remainderBufLen); - } + generated = generateInterlaceDataWithoutStb( + tableName, batchPerTbl, + tableSeq, + pThreadInfo->db_name, pstr, + insertRows, + startTime, + &remainderBufLen); } debugPrint("[%d] %s() LN%d, generated records is %d\n", @@ -8932,23 +9362,29 @@ static void* syncWrite(void *sarg) { setThreadName("syncWrite"); - uint32_t interlaceRows; + uint32_t interlaceRows = 0; if (stbInfo) { - if ((stbInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; - } else { + if (stbInfo->interlaceRows < stbInfo->insertRows) interlaceRows = stbInfo->interlaceRows; - } } else { - interlaceRows = g_args.interlace_rows; + if (g_args.interlaceRows < g_args.insertRows) + interlaceRows = g_args.interlaceRows; } if (interlaceRows > 0) { // interlace mode - return syncWriteInterlace(pThreadInfo); - } else { + if (((stbInfo) && (STMT_IFACE == stbInfo->iface)) + || (STMT_IFACE == g_args.iface)) { +#if STMT_BIND_PARAM_BATCH == 1 + return syncWriteInterlaceStmtBatch(pThreadInfo, interlaceRows); +#else + return syncWriteInterlaceStmt(pThreadInfo, interlaceRows); +#endif + } else { + return syncWriteInterlace(pThreadInfo, interlaceRows); + } + }else { // progressive mode return syncWriteProgressive(pThreadInfo); } @@ -9231,22 +9667,25 @@ static void startMultiThreadInsertData(int threads, char* db_name, assert(stmtBuffer); #if STMT_BIND_PARAM_BATCH == 1 - uint32_t interlaceRows; + uint32_t interlaceRows = 0; uint32_t batch; if (stbInfo) { if ((stbInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; + && (g_args.interlaceRows > 0) + ) { + interlaceRows = g_args.interlaceRows; - if (interlaceRows > stbInfo->insertRows) { - interlaceRows = stbInfo->insertRows; - } } else { interlaceRows = stbInfo->interlaceRows; } + + if (interlaceRows > stbInfo->insertRows) { + interlaceRows = 0; + } } else { - interlaceRows = g_args.interlace_rows; + if (g_args.interlaceRows < g_args.insertRows) + interlaceRows = g_args.interlaceRows; } if (interlaceRows > 0) { @@ -9408,13 +9847,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, taos_stmt_close(pThreadInfo->stmt); } -#if STMT_BIND_PARAM_BATCH == 1 tmfree((char *)pThreadInfo->bind_ts); +#if STMT_BIND_PARAM_BATCH == 1 tmfree((char *)pThreadInfo->bind_ts_array); tmfree(pThreadInfo->bindParams); tmfree(pThreadInfo->is_null); #else - tmfree((char *)pThreadInfo->bind_ts); if (pThreadInfo->sampleBindArray) { for (int k = 0; k < MAX_SAMPLES; k++) { uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( diff --git a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json index 1b56830189..197f8a208e 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json +++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json @@ -41,7 +41,7 @@ "batch_create_tbl_num": 10, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 1000, + "insert_rows": 1001, "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", From 6c1eb1ee92f5849463c5e65985b6a1c01a158692 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Sep 2021 15:07:50 +0800 Subject: [PATCH 54/60] [td-6563]fix compiler error. --- src/client/src/tscUtil.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 6a75ff5f09..2bd601d812 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2085,7 +2085,7 @@ int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size) { assert(pCmd->allocSize == 0); pCmd->payload = malloc(size); - pCmd->allocSize = size; + pCmd->allocSize = (uint32_t) size; } else if (pCmd->allocSize < size) { char* tmp = realloc(pCmd->payload, size); if (tmp == NULL) { @@ -2093,7 +2093,7 @@ int32_t tscAllocPayloadFast(SSqlCmd *pCmd, size_t size) { } pCmd->payload = tmp; - pCmd->allocSize = size; + pCmd->allocSize = (uint32_t) size; } assert(pCmd->allocSize >= size); From c90d48a763548d040a22fea244985e2110d7b817 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 11 Sep 2021 15:45:01 +0800 Subject: [PATCH 55/60] [td-6563]set the value to be 0 if the corresponding attributes are missing in query message. --- src/client/src/tscServer.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index c583e566b9..e23cd88bb7 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1067,6 +1067,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += pCond->len; } + } else { + pQueryMsg->tagCondLen = 0; } if (pQueryInfo->bufLen > 0) { @@ -1138,6 +1140,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } } else { pQueryMsg->udfContentOffset = 0; + pQueryMsg->udfContentLen = 0; } memcpy(pMsg, pSql->sqlstr, sqlLen); From e9e5dc22f755af9d1f12801b001949c239c66175 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 11 Sep 2021 17:56:33 +0800 Subject: [PATCH 56/60] Hotfix/sangshuduo/td 5872 taosdemo stmt improve (#7872) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [TD-5872]: taosdemo stmt improve. * refactor stmt functions. * [TD-5872]: taosdemo stmt csv perf improve. * rand func back to early impl. * fix windows/mac compile error. * fix empty tag sample. * [TD-5873]add stmt’performance taosdemo testcase * add data_type enum and stmt_batch framework. * use data type enum and fix test case limit/offset. * revert thread number. * rename MAX_SAMPLES_ONCE_FROM_FILE to reflect reality. * split func for stmt interlace. * fix bug that get build path. Co-authored-by: Shuduo Sang Co-authored-by: tomchon --- .../taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py | 2 +- tests/pytest/tools/taosdemoPerformance.py | 2 +- tests/pytest/tools/taosdemoTest.py | 2 +- tests/pytest/tools/taosdumpTest2.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py index ca8832170b..a2059ec924 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdumpTestNanoSupport.py @@ -44,7 +44,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdump" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 51b064a08e..82c57a656d 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -120,7 +120,7 @@ class taosdemoPerformace: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdemo" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index 5662881031..3cdcdcef5a 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -36,7 +36,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdemo" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py index bed0564139..839988375b 100644 --- a/tests/pytest/tools/taosdumpTest2.py +++ b/tests/pytest/tools/taosdumpTest2.py @@ -37,7 +37,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosdump" in files): + if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] From 52e92cd3edcbadd157f09d0a467be77b3a09d53a Mon Sep 17 00:00:00 2001 From: xywang Date: Sun, 12 Sep 2021 02:46:36 +0800 Subject: [PATCH 57/60] [TD-6416]: fixed memory leak and crash bug --- src/client/src/tscAsync.c | 10 +++++++++- src/plugins/http/src/httpHandle.c | 1 + src/plugins/http/src/httpParser.c | 4 ---- src/plugins/http/src/httpResp.c | 2 ++ src/plugins/http/src/httpServer.c | 8 +++++--- src/plugins/http/src/httpSql.c | 1 - 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 6b12cd0da0..4a621d47c0 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -60,17 +60,25 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr); pCmd->resColumnId = TSDB_RES_COL_ID; + taosAcquireRef(tscObjRef, pSql->self); + int32_t code = tsParseSql(pSql, true); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return; + + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + taosReleaseRef(tscObjRef, pSql->self); + return; + } if (code != TSDB_CODE_SUCCESS) { pSql->res.code = code; tscAsyncResultOnError(pSql); + taosReleaseRef(tscObjRef, pSql->self); return; } SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd); executeQuery(pSql, pQueryInfo); + taosReleaseRef(tscObjRef, pSql->self); } // TODO return the correct error code to client in tscQueueAsyncError diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c index d51c774ff2..9719d93824 100644 --- a/src/plugins/http/src/httpHandle.c +++ b/src/plugins/http/src/httpHandle.c @@ -35,6 +35,7 @@ bool httpProcessData(HttpContext* pContext) { if (!httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_HANDLING)) { httpTrace("context:%p, fd:%d, state:%s not in ready state, stop process request", pContext, pContext->fd, httpContextStateStr(pContext->state)); + pContext->error = true; httpCloseContextByApp(pContext); return false; } diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index 62b1737f6f..7066f19769 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -1157,10 +1157,6 @@ static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) { httpOnError(parser, HTTP_CODE_INTERNAL_SERVER_ERROR, TSDB_CODE_HTTP_PARSE_ERROR_STATE); } - if (ok != 0) { - pContext->error = true; - } - return ok; } diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c index 79e728dd45..1d05b455cb 100644 --- a/src/plugins/http/src/httpResp.c +++ b/src/plugins/http/src/httpResp.c @@ -147,6 +147,8 @@ void httpSendErrorResp(HttpContext *pContext, int32_t errNo) { httpCode = pContext->parser->httpCode; } + pContext->error = true; + char *httpCodeStr = httpGetStatusDesc(httpCode); httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo & 0XFFFF, tstrerror(errNo)); } diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index f02859f165..13a0835c39 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -191,8 +191,6 @@ static void httpProcessHttpData(void *param) { if (httpReadData(pContext)) { (*(pThread->processData))(pContext); atomic_fetch_add_32(&pServer->requestNum, 1); - } else { - httpReleaseContext(pContext/*, false*/); } } } @@ -402,13 +400,17 @@ static bool httpReadData(HttpContext *pContext) { } else if (nread < 0) { if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno); - return false; // later again + continue; // later again } else { httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno); + taosCloseSocket(pContext->fd); + httpReleaseContext(pContext/*, false */); return false; } } else { httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread); + taosCloseSocket(pContext->fd); + httpReleaseContext(pContext/*, false */); return false; } } diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 0dd451f72d..602767a656 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -405,7 +405,6 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) { if (pContext->session == NULL) { httpSendErrorResp(pContext, TSDB_CODE_HTTP_SESSION_FULL); - httpCloseContextByApp(pContext); } else { httpExecCmd(pContext); } From 54214fa4699a01f2fcf752cfd5929ba55a04308d Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Sun, 12 Sep 2021 16:44:56 +0800 Subject: [PATCH 58/60] [TD-6404] : describe Schemaless Insert. --- documentation20/cn/05.insert/docs.md | 97 +++++++++++++++++++++---- documentation20/cn/08.connector/docs.md | 19 +++++ 2 files changed, 100 insertions(+), 16 deletions(-) diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index f055b0c25b..92b60be27c 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -2,7 +2,7 @@ TDengine支持多种接口写入数据,包括SQL, Prometheus, Telegraf, EMQ MQTT Broker, HiveMQ Broker, CSV文件等,后续还将提供Kafka, OPC等接口。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。 -## SQL写入 +## SQL 写入 应用通过C/C++、JDBC、GO、C#或Python Connector 执行SQL insert语句来插入数据,用户还可以通过TAOS Shell,手动输入SQL insert语句插入数据。比如下面这条insert 就将一条记录写入到表d1001中: ```mysql @@ -27,11 +27,73 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。 -## Prometheus直接写入 +## Schemaless 写入 + +在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供 Schemaless 写入方式,可以免于预先创建超级表/数据子表,而是随着数据写入,自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless) 章节。这里对 Schemaless 的数据表达格式进行描述。 + +### Schemaless 数据行协议 + +Schemaless 采用一个字符串来表达最终存储的一个数据行(可以向 Schemaless 写入 API 中一次传入多个字符串来实现多个数据行的批量写入),其格式约定如下: +```json +measurement,tag_set field_set timestamp +``` + +其中, +* measurement 将作为数据表名。它与 tag_set 之间使用一个英文逗号来分隔。 +* tag_set 将作为标签数据,其格式形如 `=,=`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。 +* field_set 将作为普通列数据,其格式形如 `=,=`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。 +* timestamp 即本行数据对应的主键时间戳。 + +在 Schemaless 的数据行协议中,tag_set、field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说: +* 如果两边有英文双引号,表示 BIANRY(32) 类型。例如 `"abc"`。 +* 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。 +* 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号) +* 数值类型将通过后缀来区分数据类型: + - 没有后缀,为 FLOAT 类型; + - 后缀为 f32,为 FLOAT 类型; + - 后缀为 f64,为 DOUBLE 类型; + - 后缀为 i8,表示为 TINYINT (INT8) 类型; + - 后缀为 i16,表示为 SMALLINT (INT16) 类型; + - 后缀为 i32,表示为 INT (INT32) 类型; + - 后缀为 i64,表示为 BIGINT (INT64) 类型; + - 后缀为 b,表示为 BOOL 类型。 +* t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。 + +timestamp 位置的时间戳通过后缀来声明时间精度,具体如下: +* 不带任何后缀的长整数会被当作微秒来处理; +* 当后缀为 s 时,表示秒时间戳; +* 当后缀为 ms 时,表示毫秒时间戳; +* 当后缀为 us 时,表示微秒时间戳; +* 当后缀为 ns 时,表示纳秒时间戳; +* 当时间戳为 0 时,表示采用客户端的当前时间(因此,同一批提交的数据中,时间戳 0 会被解释为同一个时间点,于是就有可能导致时间戳重复)。 + +例如,如下 Schemaless 数据行表示:向名为 st 的超级表下的 t1 标签为 3(BIGINT 类型)、t2 标签为 4(DOUBLE 类型)、t3 标签为 "t3"(BINARY 类型)的数据子表,写入 c1 列为 3(BIGINT 类型)、c2 列为 false(BOOL 类型)、c3 列为 "passit"(NCHAR 类型)、c4 列为 4(DOUBLE 类型)、主键时间戳为 1626006833639000000(纳秒精度)的一行数据。 +```json +st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns +``` + +### Schemaless 的处理逻辑 + +Schemaless 按照如下原则来处理行数据: +1. 当 tag_set 中有 ID 字段时,该字段的值将作为数据子表的表名。 +2. 没有 ID 字段时,将使用 `measurement + tag_value1 + tag_value2 + ...` 的 md5 值来作为子表名。 +3. 如果指定的超级表名不存在,则 Schemaless 会创建这个超级表。 +4. 如果指定的数据子表不存在,则 Schemaless 会使用 tag values 创建这个数据子表。 +5. 如果数据行中指定的标签列或普通列不存在,则 Schemaless 会在超级表中增加对应的标签列或普通列(只增不减)。 +6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 +7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,那么 Schemaless 会增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 +8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 +9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 + +**注意:**Schemaless 所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。 + +关于 Schemaless 的字符串编码处理、时区设置等,均会沿用 TAOSC 客户端的设置。 + +## Prometheus 直接写入 [Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需对Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用Bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 -### 从源代码编译blm_prometheus +### 从源代码编译 blm_prometheus 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: - Linux操作系统的服务器 @@ -46,11 +108,11 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_prometheus的可执行程序。 -### 安装Prometheus +### 安装 Prometheus 通过Prometheus的官网下载安装。具体请见:[下载地址](https://prometheus.io/download/)。 -### 配置Prometheus +### 配置 Prometheus 参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置: @@ -60,7 +122,8 @@ go build 启动Prometheus后,可以通过taos客户端查询确认数据是否成功写入。 -### 启动blm_prometheus程序 +### 启动 blm_prometheus 程序 + blm_prometheus程序有以下选项,在启动blm_prometheus程序时可以通过设定这些选项来设定blm_prometheus的配置。 ```bash --tdengine-name @@ -94,7 +157,8 @@ remote_write: - url: "http://10.1.2.3:8088/receive" ``` -### 查询prometheus写入数据 +### 查询 prometheus 写入数据 + prometheus产生的数据格式如下: ```json { @@ -105,10 +169,10 @@ prometheus产生的数据格式如下: instance="192.168.99.116:8443", job="kubernetes-apiservers", le="125000", - resource="persistentvolumes", s - cope="cluster", + resource="persistentvolumes", + scope="cluster", verb="LIST", - version=“v1" + version="v1" } } ``` @@ -118,11 +182,11 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` -## Telegraf直接写入 +## Telegraf 直接写入 [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 -### 从源代码编译blm_telegraf +### 从源代码编译 blm_telegraf 用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件: @@ -139,11 +203,11 @@ go build 一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。 -### 安装Telegraf +### 安装 Telegraf 目前TDengine支持Telegraf 1.7.4以上的版本。用户可以根据当前的操作系统,到Telegraf官网下载安装包,并执行安装。下载地址如下:https://portal.influxdata.com/downloads 。 -### 配置Telegraf +### 配置 Telegraf 修改Telegraf配置文件/etc/telegraf/telegraf.conf中与TDengine有关的配置项。 @@ -160,7 +224,8 @@ go build 关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。 -### 启动blm_telegraf程序 +### 启动 blm_telegraf 程序 + blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。 ```bash @@ -196,7 +261,7 @@ blm_telegraf对telegraf提供服务的端口号。 url = "http://10.1.2.3:8089/telegraf" ``` -### 查询telegraf写入数据 +### 查询 telegraf 写入数据 telegraf产生的数据格式如下: ```json diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index cd18bc9908..f6a00e64cb 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -403,6 +403,25 @@ typedef struct TAOS_MULTI_BIND { (2.1.3.0 版本新增) 用于在其他 stmt API 返回错误(返回错误码或空指针)时获取错误信息。 + +### Schemaless 方式写入接口 + +除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](https://www.taosdata.com/cn/documentation/insert#schemaless) 章节,这里介绍与之配套使用的 C/C++ API。 + +- `int taos_insert_lines(TAOS* taos, char* lines[], int numLines)` + + (2.2.0.0 版本新增) + 以 Schemaless 格式写入多行数据。其中: + * taos:调用 taos_connect 返回的数据库连接。 + * lines:由 char 字符串指针组成的数组,指向本次想要写入数据库的多行数据。 + * numLines:lines 数据的总行数。 + + 返回值为 0 表示写入成功,非零值表示出错。具体错误代码请参见 [taoserror.h](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) 文件。 + + 说明: + 1. 此接口是一个同步阻塞式接口,使用时机与 `taos_query()` 一致。 + 2. 在调用此接口之前,必须先调用 `taos_select_db()` 来确定目前是在向哪个 DB 来写入。 + ### 连续查询接口 TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时间段,对一张或多张数据库的表(数据流)进行各种实时聚合计算操作。操作简单,仅有打开、关闭流的API。具体如下: From 375b57ca7e59992f9ff6deb8ee5253ca2ef6f7e0 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Sun, 12 Sep 2021 20:10:22 +0800 Subject: [PATCH 59/60] [TD-4591] : describe lossy compression module TSZ for FLOAT & DOUBLE type. --- documentation20/cn/11.administrator/docs.md | 29 +++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 35eff03423..fe9417a861 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -568,6 +568,35 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会 需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。 + +## 浮点数有损压缩 + +在车联网等物联网智能应用场景中,经常会采集和存储海量的浮点数类型数据,如果能更高效地对此类数据进行压缩,那么不但能够节省数据存储的硬件资源,也能够因降低磁盘 I/O 数据量而提升系统性能表现。 + +从 2.1.6.0 版本开始,TDengine 提供一种名为 TSZ 的新型数据压缩算法,无论设置为有损压缩还是无损压缩,都能够显著提升浮点数类型数据的压缩率表现。目前该功能以可选模块的方式进行发布,可以通过添加特定的编译参数来启用该功能(也即常规安装包中暂未包含该功能)。 + +**需要注意的是,该功能一旦启用,效果是全局的,也即会对系统中所有的 FLOAT、DOUBLE 类型的数据生效。同时,在启用了浮点数有损压缩功能后写入的数据,也无法被未启用该功能的版本载入,并有可能因此而导致数据库服务报错退出。** + +### 创建支持 TSZ 压缩算法的 TDengine 版本 + +TSZ 模块保存在单独的代码仓库 https://github.com/taosdata/TSZ 中。可以通过以下步骤创建包含此模块的 TDengine 版本: +1. TDengine 中的插件目前只支持通过 SSH 的方式拉取和编译,所以需要自己先配置好通过 SSH 拉取 GitHub 代码的环境。 +2. `git clone git@github.com:taosdata/TDengine -b your_branchname --recurse-submodules` 通过 `--recurse-submodules` 使依赖模块的源代码可以被一并下载。 +3. `mkdir debug && cd debug` 进入单独的编译目录。 +4. `cmake .. -DTSZ_ENABLED=true` 其中参数 `-DTSZ_ENABLED=true` 表示在编译过程中加入对 TSZ 插件功能的支持。如果成功激活对 TSZ 模块的编译,那么 CMAKE 过程中也会显示 `build with TSZ enabled` 字样。 +5. 编译成功后,包含 TSZ 浮点压缩功能的插件便已经编译进了 TDengine 中了,可以通过调整 taos.cfg 中的配置参数来使用此功能了。 + +### 通过配置文件来启用 TSZ 压缩算法 + +如果要启用 TSZ 压缩算法,除了在 TDengine 的编译过程需要声明启用 TSZ 模块之外,还需要在 taos.cfg 配置文件中对以下参数进行设置: +* lossyColumns:配置要进行有损压缩的浮点数数据类型。参数值类型为字符串,含义为:空 - 关闭有损压缩;float - 只对 FLOAT 类型进行有损压缩;double - 只对 DOUBLE 类型进行有损压缩;float|double:对 FLOAT 和 DOUBLE 类型都进行有损压缩。默认值是“空”,也即关闭有损压缩。 +* fPrecision:设置 float 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 FLOAT,最小值为 0.0,最大值为 100,000.0。缺省值为 0.00000001(1E-8)。 +* dPrecision:设置 double 类型浮点数压缩精度,小于此值的浮点数尾数部分将被截断。参数值类型为 DOUBLE,最小值为 0.0,最大值为 100,000.0。缺省值为 0.0000000000000001(1E-16)。 +* maxRange:表示数据的最大浮动范围。一般无需调整,在数据具有特定特征时可以配合 range 参数来实现极高的数据压缩率。默认值为 500。 +* range:表示数据大体浮动范围。一般无需调整,在数据具有特定特征时可以配合 maxRange 参数来实现极高的数据压缩率。默认值为 100。 + +**注意:**对 cfg 配置文件中参数值的任何调整,都需要重新启动 taosd 才能生效。并且以上选项为全局配置选项,配置后对所有数据库中所有表的 FLOAT 及 DOUBLE 类型的字段生效。 + ## 文件目录结构 安装TDengine后,默认会在操作系统中生成下列目录或文件: From 4290c0d13c6bec6296970a9dd5e65022925b5cd1 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Sun, 12 Sep 2021 21:28:27 +0800 Subject: [PATCH 60/60] [TD-6187] : describe JOIN clause. --- documentation20/cn/12.taos-sql/docs.md | 36 +++++++++++++++++++++----- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index c53972d7ca..d4ae0be720 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -730,6 +730,34 @@ Query OK, 1 row(s) in set (0.001091s) 5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 + +### JOIN 子句 + +从 2.2.0.0 版本开始,TDengine 对内连接(INNER JOIN)中的自然连接(Natural join)操作实现了完整的支持。也即支持“普通表与普通表之间”、“超级表与超级表之间”、“子查询与子查询之间”进行自然连接。自然连接与内连接的主要区别是,自然连接要求参与连接的字段在不同的表/超级表中必须是同名字段。也即,TDengine 在连接关系的表达中,要求必须使用同名数据列/标签列的相等关系。 + +在普通表与普通表之间的 JOIN 操作中,只能使用主键时间戳之间的相等关系。例如: +```sql +SELECT * +FROM temp_tb_1 t1, pressure_tb_1 t2 +WHERE t1.ts = t2.ts +``` + +在超级表与超级表之间的 JOIN 操作中,除了主键时间戳一致的条件外,还要求引入能实现一一对应的标签列的相等关系。例如: +```sql +SELECT * +FROM temp_stable t1, temp_stable t2 +WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0; +``` + +类似地,也可以对多个子查询的查询结果进行 JOIN 操作。 + +注意,JOIN 操作存在如下限制要求: +1. 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。 +2. 在包含 JOIN 操作的查询语句中不支持 FILL。 +3. 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。 +4. 不支持只对其中一部分表做 GROUP BY。 +5. JOIN 查询的不同表的过滤条件之间不能为 OR。 + ### 嵌套查询 @@ -757,7 +785,7 @@ SELECT ... FROM (SELECT ... FROM ...) ...; * 外层查询不支持 GROUP BY。 -### UNION ALL 操作符 +### UNION ALL 子句 ```mysql SELECT ... @@ -1486,12 +1514,6 @@ SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), P TAOS SQL 支持对标签、TBNAME 进行 GROUP BY 操作,也支持普通列进行 GROUP BY,前提是:仅限一列且该列的唯一值小于 10 万个。 -**JOIN 操作的限制** - -TAOS SQL 支持表之间按主键时间戳来 join 两张表的列,暂不支持两个表之间聚合后的四则运算。 - -JOIN 查询的不同表的过滤条件之间不能为 OR。 - **IS NOT NULL 与不为空的表达式适用范围** IS NOT NULL 支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。