From fcf6a1cf1f068bbc19cff6a6bffd493396d1b942 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 17 Mar 2021 16:08:55 +0800 Subject: [PATCH 01/17] [TD-3320]: fix sync coredump --- src/tsdb/src/tsdbSync.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/tsdb/src/tsdbSync.c b/src/tsdb/src/tsdbSync.c index 6b8483e4a6..ee545fbabc 100644 --- a/src/tsdb/src/tsdbSync.c +++ b/src/tsdb/src/tsdbSync.c @@ -677,13 +677,13 @@ static int32_t tsdbRecvDFileSetInfo(SSyncH *pSynch) { static int tsdbReload(STsdbRepo *pRepo, bool isMfChanged) { // TODO: may need to stop and restart stream - if (isMfChanged) { - tsdbCloseMeta(pRepo); - tsdbFreeMeta(pRepo->tsdbMeta); - pRepo->tsdbMeta = tsdbNewMeta(REPO_CFG(pRepo)); - tsdbOpenMeta(pRepo); - tsdbLoadMetaCache(pRepo, true); - } + // if (isMfChanged) { + tsdbCloseMeta(pRepo); + tsdbFreeMeta(pRepo->tsdbMeta); + pRepo->tsdbMeta = tsdbNewMeta(REPO_CFG(pRepo)); + tsdbOpenMeta(pRepo); + tsdbLoadMetaCache(pRepo, true); + // } tsdbUnRefMemTable(pRepo, pRepo->mem); tsdbUnRefMemTable(pRepo, pRepo->imem); From 363bacbe76b6711113c6ee84931925e858b2fce4 Mon Sep 17 00:00:00 2001 From: dapan1121 <89396746@qq.com> Date: Wed, 17 Mar 2021 17:42:39 +0800 Subject: [PATCH 02/17] fix bug --- src/client/src/tscAsync.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 5cba897b30..87f6058cec 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -281,7 +281,7 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { } static void tscAsyncResultCallback(SSchedMsg *pMsg) { - SSqlObj* pSql = pMsg->ahandle; + SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pMsg->ahandle); if (pSql == NULL || pSql->signature != pSql) { tscDebug("%p SqlObj is freed, not add into queue async res", pSql); return; @@ -292,23 +292,26 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) { SSqlRes *pRes = &pSql->res; if (pSql->fp == NULL || pSql->fetchFp == NULL){ + taosReleaseRef(tscObjRef, pSql->self); return; } pSql->fp = pSql->fetchFp; (*pSql->fp)(pSql->param, pSql, pRes->code); + taosReleaseRef(tscObjRef, pSql->self); } void tscAsyncResultOnError(SSqlObj* pSql) { SSchedMsg schedMsg = {0}; schedMsg.fp = tscAsyncResultCallback; - schedMsg.ahandle = pSql; + schedMsg.ahandle = (void *)pSql->self; schedMsg.thandle = (void *)1; schedMsg.msg = 0; taosScheduleTask(tscQhandle, &schedMsg); } + int tscSendMsgToServer(SSqlObj *pSql); void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { From 282d1c10f254d391cc4693451bf2f7f926ba375e Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 18 Mar 2021 11:18:51 +0800 Subject: [PATCH 03/17] [TD-3318]add case for stddev --- tests/pytest/functions/function_stddev.py | 6 +++++- tests/test-all.sh | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/pytest/functions/function_stddev.py b/tests/pytest/functions/function_stddev.py index afc59ac83d..3ff2b82bf6 100644 --- a/tests/pytest/functions/function_stddev.py +++ b/tests/pytest/functions/function_stddev.py @@ -118,7 +118,11 @@ class TDTestCase: if i == 1 or i == 5 or i == 6 or i == 7 or i == 9 or i == 8 :continue tdSql.query('select stddev(c%d),stddev(c%d) from s group by c%d' %( i+1 , i+1 , i+1 ) ) - + #add for TD-3318 + tdSql.execute('create table t1(ts timestamp, k int, b binary(12));') + tdSql.execute("insert into t1 values(now, 1, 'abc');") + tdLog.info("select stddev(k) from t1 where b <> 'abc' interval(1s);") + tdSql.query("select stddev(k) from t1 where b <> 'abc' interval(1s);") def stop(self): diff --git a/tests/test-all.sh b/tests/test-all.sh index b617dd2f07..c8546f591a 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -29,7 +29,7 @@ function dohavecore(){ proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then echo 'taosd or taos has generated core' - tar -zcvPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz /usr/local/taos/ + tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz if [[ $1 == 1 ]];then echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit exit 8 From 53496c7844b0377153d284a6f89e694d82ba71e3 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 18 Mar 2021 11:21:56 +0800 Subject: [PATCH 04/17] fix --- tests/test-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index c8546f591a..dcc2b61f43 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -29,7 +29,7 @@ function dohavecore(){ proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then echo 'taosd or taos has generated core' - tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz + tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz /usr/local/taos/ if [[ $1 == 1 ]];then echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit exit 8 From 9f00cd82bbe7190bd734e22e62c05d0bf1531bd1 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 18 Mar 2021 14:39:34 +0800 Subject: [PATCH 05/17] [TD-3352] : move "quick run" section into "install" part. --- README.md | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 6ad9d4a97f..97ac9dffeb 100644 --- a/README.md +++ b/README.md @@ -160,24 +160,13 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` -# Quick Run - -# Quick Run -To quickly start a TDengine server after building, run the command below in terminal: -```bash -./build/bin/taosd -c test/cfg -``` -In another terminal, use the TDengine shell to connect the server: -```bash -./build/bin/taos -c test/cfg -``` -option "-c test/cfg" specifies the system configuration file directory. - # Installing + After building successfully, TDengine can be installed by: ```bash sudo make install ``` + Users can find more information about directories installed on the system in the [directory and files](https://www.taosdata.com/en/documentation/administrator/#Directory-and-Files) section. Since version 2.0, installing from source code will also configure service management for TDengine. Users can also choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) for it. @@ -193,6 +182,20 @@ taos If TDengine shell connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. +## Quick Run + +If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: +```bash +./build/bin/taosd -c test/cfg +``` + +In another terminal, use the TDengine shell to connect the server: +```bash +./build/bin/taos -c test/cfg +``` + +option "-c test/cfg" specifies the system configuration file directory. + # Try TDengine It is easy to run SQL commands from TDengine shell which is the same as other SQL databases. ```sql From 2d8c6952398900567c5d2e8b8ad6b91f58741f0b Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 18 Mar 2021 14:47:39 +0800 Subject: [PATCH 06/17] [TD-3363] : fix typo. --- documentation20/cn/13.faq/docs.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index d3169d507a..e2285b29e2 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -16,13 +16,13 @@ ## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆ -2.0版本在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: +2.0版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: -1. 删除配置文件,执行 sudo rm -rf /etc/taos/taos.cfg -2. 删除日志文件,执行 sudo rm -rf /var/log/taos/ -3. 确保数据已经不再需要的前提下,删除数据文件,执行 sudo rm -rf /var/lib/taos/ -4. 安装最新稳定版本的TDengine -5. 如果数据需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 +1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg` +2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/` +3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/` +4. 安装最新稳定版本的 TDengine +5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 ## 2. Windows平台下JDBCDriver找不到动态链接库,怎么办? From 7447a8c562854d16a9fdb943b965e2f281b44bb9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 18 Mar 2021 15:24:36 +0800 Subject: [PATCH 07/17] [td-3361]: nchar tag filter caused client crash. --- src/client/src/tscSQLParser.c | 14 +++++++++++--- tests/script/general/parser/topbot.sim | 9 +++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 8b2998c0e7..3b65d0625f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4138,13 +4138,21 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) { } int32_t retVal = TSDB_CODE_SUCCESS; + + int32_t bufLen = 0; + if (IS_NUMERIC_TYPE(vVariant->nType)) { + bufLen = 60; // The maximum length of string that a number is converted to. + } else { + bufLen = vVariant->nLen + 1; + } + if (schemaType == TSDB_DATA_TYPE_BINARY) { - char *tmp = calloc(1, vVariant->nLen + TSDB_NCHAR_SIZE); + char *tmp = calloc(1, bufLen * TSDB_NCHAR_SIZE); retVal = tVariantDump(vVariant, tmp, schemaType, false); free(tmp); } else if (schemaType == TSDB_DATA_TYPE_NCHAR) { // pRight->value.nLen + 1 is larger than the actual nchar string length - char *tmp = calloc(1, (vVariant->nLen + 1) * TSDB_NCHAR_SIZE); + char *tmp = calloc(1, bufLen * TSDB_NCHAR_SIZE); retVal = tVariantDump(vVariant, tmp, schemaType, false); free(tmp); } else { @@ -4155,7 +4163,7 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) { if (retVal != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - }while (0); + } while (0); return TSDB_CODE_SUCCESS; } diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index 80a122238e..e23bbf6724 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -316,4 +316,13 @@ if $data13 != @20-02-02 01:01:01.000@ then return -1 endi +print ===============================>td-3361 +sql create table ttm1(ts timestamp, k int) tags(a nchar(12)); +sql create table ttm1_t1 using ttm1 tags('abcdef') +sql insert into ttm1_t1 values(now, 1) +sql select * from ttm1 where a=123456789012 +if $row != 0 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From e4e46768a64c3be441a31f64d983c8b07289e47f Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Thu, 18 Mar 2021 15:50:55 +0800 Subject: [PATCH 08/17] [TD-3353]: solve race condition coredump --- src/inc/tsdb.h | 30 ++++++++++-- src/query/src/qExecutor.c | 2 +- src/tsdb/inc/tsdbMemTable.h | 23 ++-------- src/tsdb/src/tsdbMemTable.c | 92 +++++++++++++++++-------------------- src/tsdb/src/tsdbRead.c | 18 ++++---- 5 files changed, 82 insertions(+), 83 deletions(-) diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 09444bb8e4..493bdbe5de 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -25,6 +25,8 @@ #include "tdataformat.h" #include "tname.h" #include "hash.h" +#include "tlockfree.h" +#include "tlist.h" #ifdef __cplusplus extern "C" { @@ -172,10 +174,32 @@ typedef struct STsdbQueryCond { int32_t type; // data block load type: } STsdbQueryCond; +typedef struct STableData STableData; +typedef struct { + T_REF_DECLARE() + SRWLatch latch; + TSKEY keyFirst; + TSKEY keyLast; + int64_t numOfRows; + int32_t maxTables; + STableData **tData; + SList * actList; + SList * extraBuffList; + SList * bufBlockList; + int64_t pointsAdd; // TODO + int64_t storageAdd; // TODO +} SMemTable; + +typedef struct { + SMemTable* mem; + SMemTable* imem; + SMemTable mtable; + SMemTable* omem; +} SMemSnapshot; + typedef struct SMemRef { - int32_t ref; - void * mem; - void * imem; + int32_t ref; + SMemSnapshot snapshot; } SMemRef; typedef struct SDataBlockInfo { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 54bc72b307..59e04c9cac 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1840,7 +1840,7 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) { pRuntimeEnv->pQueryHandle = NULL; SMemRef* pMemRef = &pQuery->memRef; - assert(pMemRef->ref == 0 && pMemRef->imem == NULL && pMemRef->mem == NULL); + assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL); } static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { diff --git a/src/tsdb/inc/tsdbMemTable.h b/src/tsdb/inc/tsdbMemTable.h index bd64ed4a52..6046274af4 100644 --- a/src/tsdb/inc/tsdbMemTable.h +++ b/src/tsdb/inc/tsdbMemTable.h @@ -31,29 +31,14 @@ typedef struct { SSkipListIterator *pIter; } SCommitIter; -typedef struct { +struct STableData { uint64_t uid; TSKEY keyFirst; TSKEY keyLast; int64_t numOfRows; SSkipList* pData; T_REF_DECLARE() -} STableData; - -typedef struct { - T_REF_DECLARE() - SRWLatch latch; - TSKEY keyFirst; - TSKEY keyLast; - int64_t numOfRows; - int32_t maxTables; - STableData** tData; - SList* actList; - SList* extraBuffList; - SList* bufBlockList; - int64_t pointsAdd; // TODO - int64_t storageAdd; // TODO -} SMemTable; +}; enum { TSDB_UPDATE_META, TSDB_DROP_META }; @@ -77,8 +62,8 @@ typedef struct { int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); -int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem, SArray* pATable); -void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem); +int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemSnapshot* pSnapshot, SArray* pATable); +void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemSnapshot* pSnapshot); void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes); int tsdbAsyncCommit(STsdbRepo* pRepo); int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols, diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 6818f2ed14..20ec426018 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -124,88 +124,80 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) { return 0; } -int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem, SArray *pATable) { - SMemTable *tmem; +int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemSnapshot *pSnapshot, SArray *pATable) { + memset(pSnapshot, 0, sizeof(*pSnapshot)); - // Get snap object if (tsdbLockRepo(pRepo) < 0) return -1; - tmem = pRepo->mem; - *pIMem = pRepo->imem; - tsdbRefMemTable(pRepo, tmem); - tsdbRefMemTable(pRepo, *pIMem); + pSnapshot->omem = pRepo->mem; + pSnapshot->imem = pRepo->imem; + tsdbRefMemTable(pRepo, pRepo->mem); + tsdbRefMemTable(pRepo, pRepo->imem); if (tsdbUnlockRepo(pRepo) < 0) return -1; - // Copy mem objects and ref needed STableData - if (tmem) { - taosRLockLatch(&(tmem->latch)); + if (pSnapshot->omem) { + taosRLockLatch(&(pSnapshot->omem->latch)); - *pMem = (SMemTable *)calloc(1, sizeof(**pMem)); - if (*pMem == NULL) { + pSnapshot->mem = &(pSnapshot->mtable); + + pSnapshot->mem->tData = (STableData **)calloc(pSnapshot->omem->maxTables, sizeof(STableData *)); + if (pSnapshot->mem->tData == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - taosRUnLockLatch(&(tmem->latch)); - tsdbUnRefMemTable(pRepo, tmem); - tsdbUnRefMemTable(pRepo, *pIMem); - *pMem = NULL; - *pIMem = NULL; + taosRUnLockLatch(&(pSnapshot->omem->latch)); + tsdbUnRefMemTable(pRepo, pSnapshot->omem); + tsdbUnRefMemTable(pRepo, pSnapshot->imem); + pSnapshot->mem = NULL; + pSnapshot->imem = NULL; + pSnapshot->omem = NULL; return -1; } - (*pMem)->tData = (STableData **)calloc(tmem->maxTables, sizeof(STableData *)); - if ((*pMem)->tData == NULL) { - terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - taosRUnLockLatch(&(tmem->latch)); - free(*pMem); - tsdbUnRefMemTable(pRepo, tmem); - tsdbUnRefMemTable(pRepo, *pIMem); - *pMem = NULL; - *pIMem = NULL; - return -1; - } - - (*pMem)->keyFirst = tmem->keyFirst; - (*pMem)->keyLast = tmem->keyLast; - (*pMem)->numOfRows = tmem->numOfRows; - (*pMem)->maxTables = tmem->maxTables; + pSnapshot->mem->keyFirst = pSnapshot->omem->keyFirst; + pSnapshot->mem->keyLast = pSnapshot->omem->keyLast; + pSnapshot->mem->numOfRows = pSnapshot->omem->numOfRows; + pSnapshot->mem->maxTables = pSnapshot->omem->maxTables; for (size_t i = 0; i < taosArrayGetSize(pATable); i++) { STable * pTable = *(STable **)taosArrayGet(pATable, i); int32_t tid = TABLE_TID(pTable); - STableData *pTableData = (tid < tmem->maxTables) ? tmem->tData[tid] : NULL; + STableData *pTableData = (tid < pSnapshot->omem->maxTables) ? pSnapshot->omem->tData[tid] : NULL; if ((pTableData == NULL) || (TABLE_UID(pTable) != pTableData->uid)) continue; - (*pMem)->tData[tid] = tmem->tData[tid]; - T_REF_INC(tmem->tData[tid]); + pSnapshot->mem->tData[tid] = pTableData; + T_REF_INC(pTableData); } - taosRUnLockLatch(&(tmem->latch)); + taosRUnLockLatch(&(pSnapshot->omem->latch)); } - tsdbUnRefMemTable(pRepo, tmem); - - tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem); + tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pSnapshot->omem, pSnapshot->imem); return 0; } -void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) { - tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem); +void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemSnapshot *pSnapshot) { + tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pSnapshot->omem, pSnapshot->imem); - if (pMem != NULL) { - for (size_t i = 0; i < pMem->maxTables; i++) { - STableData *pTableData = pMem->tData[i]; + if (pSnapshot->mem) { + ASSERT(pSnapshot->omem != NULL); + + for (size_t i = 0; i < pSnapshot->mem->maxTables; i++) { + STableData *pTableData = pSnapshot->mem->tData[i]; if (pTableData) { tsdbFreeTableData(pTableData); } } - free(pMem->tData); - free(pMem); + tfree(pSnapshot->mem->tData); + + tsdbUnRefMemTable(pRepo, pSnapshot->omem); } - if (pIMem != NULL) { - tsdbUnRefMemTable(pRepo, pIMem); - } + tsdbUnRefMemTable(pRepo, pSnapshot->imem); + + pSnapshot->mem = NULL; + pSnapshot->imem = NULL; + pSnapshot->omem = NULL; } void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 7b7c244ba8..3426fe86c2 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -194,7 +194,7 @@ static void tsdbMayTakeMemSnapshot(STsdbQueryHandle* pQueryHandle, SArray* psTab SMemRef* pMemRef = pQueryHandle->pMemRef; if (pQueryHandle->pMemRef->ref++ == 0) { - tsdbTakeMemSnapshot(pQueryHandle->pTsdb, (SMemTable**)&(pMemRef->mem), (SMemTable**)&(pMemRef->imem), psTable); + tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &(pMemRef->snapshot), psTable); } taosArrayDestroy(psTable); @@ -208,9 +208,7 @@ static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) { } if (--pMemRef->ref == 0) { - tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, pMemRef->mem, pMemRef->imem); - pMemRef->mem = NULL; - pMemRef->imem = NULL; + tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, &(pMemRef->snapshot)); } pQueryHandle->pMemRef = NULL; @@ -229,10 +227,10 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) { if (pMemRef == NULL) { return rows; } STableData* pMem = NULL; - STableData* pIMem = NULL; + STableData* pIMem = NULL; - SMemTable *pMemT = (SMemTable *)(pMemRef->mem); - SMemTable *pIMemT = (SMemTable *)(pMemRef->imem); + SMemTable* pMemT = pMemRef->snapshot.mem; + SMemTable* pIMemT = pMemRef->snapshot.imem; if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) { pMem = pMemT->tData[pCheckInfo->tableId.tid]; @@ -605,7 +603,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh int32_t order = pHandle->order; // no data in buffer, abort - if (pHandle->pMemRef->mem == NULL && pHandle->pMemRef->imem == NULL) { + if (pHandle->pMemRef->snapshot.mem == NULL && pHandle->pMemRef->snapshot.imem == NULL) { return false; } @@ -614,8 +612,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh STableData* pMem = NULL; STableData* pIMem = NULL; - SMemTable* pMemT = pHandle->pMemRef->mem; - SMemTable* pIMemT = pHandle->pMemRef->imem; + SMemTable* pMemT = pHandle->pMemRef->snapshot.mem; + SMemTable* pIMemT = pHandle->pMemRef->snapshot.imem; if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) { pMem = pMemT->tData[pCheckInfo->tableId.tid]; From 1f61557cbd79981ff66fab05bd4c967defe919b5 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 18 Mar 2021 15:56:26 +0800 Subject: [PATCH 09/17] [TD-2639] : improve expression of some sentences. --- documentation20/cn/03.architecture/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/03.architecture/docs.md b/documentation20/cn/03.architecture/docs.md index 26d8bc55c2..87553fa8ad 100644 --- a/documentation20/cn/03.architecture/docs.md +++ b/documentation20/cn/03.architecture/docs.md @@ -145,7 +145,7 @@ TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。 在TDengine的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。当为某个具体数据采集点创建表时,用户使用超级表的定义做模板,同时指定该具体采集点(表)的标签值。与传统的关系型数据库相比,表(一个数据采集点)是带有静态标签的,而且这些标签可以事后增加、删除、修改。**一张超级表包含有多张表,这些表具有相同的时序数据schema,但带有不同的标签值**。 -当对多个具有相同数据类型的数据采集点进行聚合操作时,TDengine将先把满足标签过滤条件的表从超级表的中查找出来,然后再扫描这些表的时序数据,进行聚合操作,这样能将需要扫描的数据集大幅减少,从而大幅提高聚合计算的性能。 +当对多个具有相同数据类型的数据采集点进行聚合操作时,TDengine会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高聚合计算的性能。 ## 集群与基本逻辑单元 From 7610e8111d637d8a77babc050ccb4680f0918d89 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 18 Mar 2021 17:43:09 +0800 Subject: [PATCH 10/17] Feature/sangshuduo/td 3317 taosdemo interlace (#5485) * [TD-3316] : add testcase for taosdemo limit and offset. check offset 0. * [TD-3316] : add testcase for taosdemo limit and offset. fix sample file import bug. * [TD-3316] : add test case for limit and offset. fix sample data issue. * [TD-3327] : fix taosdemo segfault when import data from sample data file. * [TD-3317] : make taosdemo support interlace mode. json parameter rows_per_tbl support. * [TD-3317] : support interlace mode. refactor * [TD-3317] : support interlace mode. refactor * [TD-3317] : support interlace mode insertion. refactor. * [TD-3317] : support interlace mode insertion. change json file. * [TD-3317] : support interlace mode insertion. fix multithread create table regression. * [TD-3317] : support interlace mode insertion. working but not perfect. * [TD-3317] : support interlace mode insertion. rename lowaTest with taosdemoTestWithJson * [TD-3317] : support interlace mode insertion. perfect * [TD-3317] : support interlace mode insertion. cleanup. * [TD-3317] : support interlace mode insertion. adjust algorithm of loop times. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 378 ++++++++++++++++++++---------------- 1 file changed, 211 insertions(+), 167 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 83195ca15d..355f738885 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -567,12 +567,19 @@ static FILE * g_fpOfInsertResult = NULL; #define debugPrint(fmt, ...) \ do { if (g_args.debug_print || g_args.verbose_print) \ fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0) + #define verbosePrint(fmt, ...) \ do { if (g_args.verbose_print) \ fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) +#define errorPrint(fmt, ...) \ + do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0) + + /////////////////////////////////////////////////// +static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); } + void printHelp() { char indent[10] = " "; printf("%s%s%s%s\n", indent, "-f", indent, @@ -645,7 +652,7 @@ void parse_args(int argc, char *argv[], SArguments *arguments) { } else if (strcmp(argv[i], "-c") == 0) { char *configPath = argv[++i]; if (wordexp(configPath, &full_path, 0) != 0) { - fprintf(stderr, "Invalid path %s\n", configPath); + errorPrint( "Invalid path %s\n", configPath); return; } taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); @@ -694,8 +701,8 @@ void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(argv[i], "DOUBLE") && strcasecmp(argv[i], "BINARY") && strcasecmp(argv[i], "NCHAR")) { - fprintf(stderr, "Invalid data_type!\n"); printHelp(); + ERROR_EXIT( "Invalid data_type!\n"); exit(EXIT_FAILURE); } sptr[0] = argv[i]; @@ -715,8 +722,8 @@ void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(token, "DOUBLE") && strcasecmp(token, "BINARY") && strcasecmp(token, "NCHAR")) { - fprintf(stderr, "Invalid data_type!\n"); printHelp(); + ERROR_EXIT("Invalid data_type!\n"); exit(EXIT_FAILURE); } sptr[index++] = token; @@ -771,8 +778,8 @@ void parse_args(int argc, char *argv[], SArguments *arguments) { printHelp(); exit(0); } else { - fprintf(stderr, "wrong options\n"); printHelp(); + ERROR_EXIT("ERROR: wrong options\n"); exit(EXIT_FAILURE); } } @@ -858,7 +865,7 @@ static int queryDbExec(TAOS *taos, char *command, int type) { if (code != 0) { debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res)); + errorPrint( "Failed to run %s, reason: %s\n", command, taos_errstr(res)); taos_free_result(res); //taos_close(taos); return -1; @@ -884,13 +891,13 @@ static void getResult(TAOS_RES *res, char* resultFileName) { if (resultFileName[0] != 0) { fp = fopen(resultFileName, "at"); if (fp == NULL) { - fprintf(stderr, "failed to open result file: %s, result will not save to file\n", resultFileName); + errorPrint("%s() LN%d, failed to open result file: %s, result will not save to file\n", __func__, __LINE__, resultFileName); } } char* databuf = (char*) calloc(1, 100*1024*1024); if (databuf == NULL) { - fprintf(stderr, "failed to malloc, warning: save result to file slowly!\n"); + errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", __func__, __LINE__); if (fp) fclose(fp); return ; @@ -1484,7 +1491,7 @@ static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { FILE* fp = fopen(fname, "at"); if (fp == NULL) { - fprintf(stderr, "ERROR: failed to open file: %s\n", fname); + errorPrint("%s() LN%d, failed to open file: %s\n", __func__, __LINE__, fname); return -1; } @@ -1529,7 +1536,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { int32_t code = taos_errno(res); if (code != 0) { - fprintf(stderr, "failed to run , reason: %s\n", taos_errstr(res)); + errorPrint( "failed to run , reason: %s\n", taos_errstr(res)); return -1; } @@ -1541,7 +1548,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); if (dbInfos[count] == NULL) { - fprintf(stderr, "failed to allocate memory for some dbInfo[%d]\n", count); + errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count); return -1; } @@ -1576,7 +1583,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { count++; if (count > MAX_DATABASE_COUNT) { - fprintf(stderr, "The database count overflow than %d\n", MAX_DATABASE_COUNT); + errorPrint( "The database count overflow than %d\n", MAX_DATABASE_COUNT); break; } } @@ -1590,7 +1597,7 @@ static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int ind FILE *fp = fopen(filename, "at"); if (fp == NULL) { - fprintf(stderr, "failed to open file: %s\n", filename); + errorPrint( "failed to open file: %s\n", filename); return; } @@ -1646,7 +1653,7 @@ static void printfQuerySystemInfo(TAOS * taos) { res = taos_query(taos, "show databases;"); SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); if (dbInfos == NULL) { - fprintf(stderr, "failed to allocate memory\n"); + errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); return; } int dbCount = getDbFromServer(taos, dbInfos); @@ -1676,8 +1683,6 @@ static void printfQuerySystemInfo(TAOS * taos) { } -static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); } - static int postProceSql(char* host, uint16_t port, char* sqlstr) { char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; @@ -1725,9 +1730,9 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { #ifdef WINDOWS - fprintf(stderr, "Could not create socket : %d" , WSAGetLastError()); + errorPrint( "Could not create socket : %d" , WSAGetLastError()); #endif - debugPrint("%s() LN%d sockfd=%d\n", __func__, __LINE__, sockfd); + debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); free(request_buf); ERROR_EXIT("ERROR opening socket"); } @@ -1847,7 +1852,7 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { - printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); + errorPrint("%s() LN%d, calloc failed! size:%d\n", __func__, __LINE__, TSDB_MAX_SQL_LEN+1); return NULL; } @@ -2140,7 +2145,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, int childTblCount = 10000; superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); if (superTbls->childTblName == NULL) { - fprintf(stderr, "alloc memory failed!"); + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); return -1; } getAllChildNameOfSuperTable(taos, dbName, @@ -2279,7 +2284,7 @@ static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command); if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - fprintf(stderr, "create supertable %s failed!\n\n", + errorPrint( "create supertable %s failed!\n\n", superTbls->sTblName); return -1; } @@ -2293,7 +2298,7 @@ static int createDatabases() { int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } char command[BUFFER_SIZE] = "\0"; @@ -2378,7 +2383,7 @@ static int createDatabases() { debugPrint("%s() %d command: %s\n", __func__, __LINE__, command); if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { taos_close(taos); - fprintf(stderr, "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); + errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); return -1; } printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); @@ -2427,7 +2432,7 @@ static void* createTable(void *sarg) char *buffer = calloc(buff_len, 1); if (buffer == NULL) { - fprintf(stderr, "Memory allocated failed!"); + errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); exit(-1); } @@ -2485,7 +2490,7 @@ static void* createTable(void *sarg) len = 0; verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer); if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){ - fprintf(stderr, "queryDbExec() failed. buffer:\n%s\n", buffer); + errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); free(buffer); return NULL; } @@ -2501,7 +2506,7 @@ static void* createTable(void *sarg) if (0 != len) { verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer); if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)) { - fprintf(stderr, "queryDbExec() failed. buffer:\n%s\n", buffer); + errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); } } @@ -2546,7 +2551,7 @@ static int startMultiThreadCreateChildTable( db_name, g_Dbs.port); if (t_info->taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); free(pids); free(infos); return -1; @@ -2724,7 +2729,7 @@ static int readSampleFromCsvFileToMem( FILE* fp = fopen(superTblInfo->sampleFile, "r"); if (fp == NULL) { - fprintf(stderr, "Failed to open sample file: %s, reason:%s\n", + errorPrint( "Failed to open sample file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); return -1; } @@ -2736,7 +2741,7 @@ static int readSampleFromCsvFileToMem( readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { - fprintf(stderr, "Failed to fseek file: %s, reason:%s\n", + errorPrint( "Failed to fseek file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); fclose(fp); return -1; @@ -2805,8 +2810,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s int columnSize = cJSON_GetArraySize(columns); if (columnSize > MAX_COLUMN_COUNT) { - printf("ERROR: failed to read json, column size overflow, max column size is %d\n", - MAX_COLUMN_COUNT); + errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", + __func__, __LINE__, MAX_COLUMN_COUNT); goto PARSE_OVER; } @@ -2824,7 +2829,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s if (countObj && countObj->type == cJSON_Number) { count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { - printf("ERROR: failed to read json, column count not found\n"); + errorPrint("%s() LN%d, failed to read json, column count not found\n", __func__, __LINE__); goto PARSE_OVER; } else { count = 1; @@ -2834,7 +2839,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(column, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - printf("ERROR: failed to read json, column type not found\n"); + errorPrint("%s() LN%d: failed to read json, column type not found\n", __func__, __LINE__); goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -2844,7 +2849,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - printf("ERROR: failed to read json, column len not found\n"); + debugPrint("%s() LN%d: failed to read json, column len not found\n", __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 8; @@ -2863,13 +2868,13 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { - printf("ERROR: failed to read json, tags not found\n"); + debugPrint("%s() LN%d, failed to read json, tags not found\n", __func__, __LINE__); goto PARSE_OVER; } int tagSize = cJSON_GetArraySize(tags); if (tagSize > MAX_TAG_COUNT) { - printf("ERROR: failed to read json, tags size overflow, max tag size is %d\n", MAX_TAG_COUNT); + debugPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", __func__, __LINE__, MAX_TAG_COUNT); goto PARSE_OVER; } @@ -2991,47 +2996,47 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); - if (gInsertInterval && gInsertInterval->type == cJSON_Number) { - g_args.insert_interval = gInsertInterval->valueint; - } else if (!gInsertInterval) { - g_args.insert_interval = 0; - } else { - fprintf(stderr, "ERROR: failed to read json, insert_interval input mistake\n"); - goto PARSE_OVER; - } - - cJSON* rowsPerTbl = cJSON_GetObjectItem(root, "rows_per_tbl"); - if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { - g_args.rows_per_tbl = rowsPerTbl->valueint; - } else if (!rowsPerTbl) { - g_args.rows_per_tbl = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req - } else { - fprintf(stderr, "ERROR: failed to read json, rows_per_tbl input mistake\n"); - goto PARSE_OVER; - } + cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); + if (gInsertInterval && gInsertInterval->type == cJSON_Number) { + g_args.insert_interval = gInsertInterval->valueint; + } else if (!gInsertInterval) { + g_args.insert_interval = 0; + } else { + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", __func__, __LINE__); + goto PARSE_OVER; + } - cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); - if (maxSqlLen && maxSqlLen->type == cJSON_Number) { - g_args.max_sql_len = maxSqlLen->valueint; - } else if (!maxSqlLen) { - g_args.max_sql_len = TSDB_PAYLOAD_SIZE; - } else { - fprintf(stderr, "ERROR: failed to read json, max_sql_len input mistake\n"); - goto PARSE_OVER; - } + cJSON* rowsPerTbl = cJSON_GetObjectItem(root, "rows_per_tbl"); + if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { + g_args.rows_per_tbl = rowsPerTbl->valueint; + } else if (!rowsPerTbl) { + g_args.rows_per_tbl = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + } else { + errorPrint("%s() LN%d, failed to read json, rows_per_tbl input mistake\n", __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); + if (maxSqlLen && maxSqlLen->type == cJSON_Number) { + g_args.max_sql_len = maxSqlLen->valueint; + } else if (!maxSqlLen) { + g_args.max_sql_len = TSDB_PAYLOAD_SIZE; + } else { + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", __func__, __LINE__); + goto PARSE_OVER; + } - cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); - if (numRecPerReq && numRecPerReq->type == cJSON_Number) { - g_args.num_of_RPR = numRecPerReq->valueint; - } else if (!numRecPerReq) { - g_args.num_of_RPR = 100; - } else { - printf("ERROR: failed to read json, num_of_records_per_req not found\n"); - goto PARSE_OVER; - } - + cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); + if (numRecPerReq && numRecPerReq->type == cJSON_Number) { + g_args.num_of_RPR = numRecPerReq->valueint; + } else if (!numRecPerReq) { + g_args.num_of_RPR = 100; + } else { + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); + goto PARSE_OVER; + } + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, if (answerPrompt && answerPrompt->type == cJSON_String @@ -3058,7 +3063,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { int dbSize = cJSON_GetArraySize(dbs); if (dbSize > MAX_DB_COUNT) { - fprintf(stderr, + errorPrint( "ERROR: failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT); goto PARSE_OVER; @@ -3257,7 +3262,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { int stbSize = cJSON_GetArraySize(stables); if (stbSize > MAX_SUPER_TABLE_COUNT) { - fprintf(stderr, + errorPrint( "ERROR: failed to read json, databases size overflow, max database is %d\n", MAX_SUPER_TABLE_COUNT); goto PARSE_OVER; @@ -3384,9 +3389,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, ts->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, + ts->valuestring, MAX_DB_NAME_SIZE); } else if (!ts) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, + "now", MAX_DB_NAME_SIZE); } else { printf("ERROR: failed to read json, start_timestamp not found\n"); goto PARSE_OVER; @@ -3493,7 +3500,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!rowsPerTbl) { g_Dbs.db[i].superTbls[j].rowsPerTbl = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - fprintf(stderr, "ERROR: failed to read json, rowsPerTbl input mistake\n"); + errorPrint("%s() LN%d, failed to read json, rowsPerTbl input mistake\n", __func__, __LINE__); goto PARSE_OVER; } @@ -3523,7 +3530,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!insertRows) { g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; } else { - fprintf(stderr, "failed to read json, insert_rows input mistake"); + errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", __func__, __LINE__); goto PARSE_OVER; } @@ -3535,7 +3542,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { - fprintf(stderr, "failed to read json, insert_interval input mistake"); + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", __func__, __LINE__); goto PARSE_OVER; } @@ -3942,7 +3949,7 @@ static bool getInfoFromJsonFile(char* file) { } else if (SUBSCRIBE_TEST == g_args.test_mode) { ret = getMetaFromQueryJsonFile(root); } else { - printf("ERROR: input json file type error! please input correct file type: insert or query or subscribe\n"); + errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", __func__, __LINE__); goto PARSE_OVER; } @@ -4024,14 +4031,14 @@ static int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuper if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - printf("binary or nchar length overflow, max size:%u\n", + errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return (-1); } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { - printf("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); + errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); return (-1); } rand_string(buf, stbInfo->columns[i].dataLen); @@ -4063,7 +4070,7 @@ static int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuper } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); } else { - printf("No support data type: %s\n", stbInfo->columns[i].dataType); + errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); return (-1); } } @@ -4138,7 +4145,8 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - fprintf(stderr, "Failed to calloc %d Bytes, reason:%s\n", + errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", + __func__, __LINE__, superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); return -1; @@ -4148,7 +4156,7 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { int ret = readSampleFromCsvFileToMem(superTblInfo); if (0 != ret) { - fprintf(stderr, "read sample from csv file failed.\n"); + errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); tmfree(sampleDataBuf); superTblInfo->sampleDataBuf = NULL; return -1; @@ -4157,29 +4165,26 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { return 0; } -static int execInsert(threadInfo *winfo, char *buffer, int k) +static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) { int affectedRows; - SSuperTable* superTblInfo = winfo->superTblInfo; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, + __func__, __LINE__, buffer); if (superTblInfo) { if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { - verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer); - affectedRows = queryDbExec(winfo->taos, buffer, INSERT_TYPE); + affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE); } else { - verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer); - int retCode = postProceSql(g_Dbs.host, g_Dbs.port, buffer); - - if (0 != retCode) { + if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) { affectedRows = -1; - printf("========restful return fail, threadID[%d]\n", winfo->threadID); + printf("========restful return fail, threadID[%d]\n", pThreadInfo->threadID); } else { affectedRows = k; } } } else { - verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer); - affectedRows = queryDbExec(winfo->taos, buffer, 1); + affectedRows = queryDbExec(pThreadInfo->taos, buffer, 1); } return affectedRows; @@ -4195,8 +4200,9 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) superTblInfo->childTblName + (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { - verbosePrint("%s() LN%d: from=%d count=%d seq=%d\n", - __func__, __LINE__, pThreadInfo->start_table_from, + verbosePrint("[%d] %s() LN%d: from=%d count=%d seq=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, pThreadInfo->ntables, tableSeq); snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); @@ -4323,7 +4329,7 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, threadInfo* pThrea tableSeq % superTblInfo->tagSampleCount); } if (NULL == tagsValBuf) { - fprintf(stderr, "tag buf failed to allocate memory\n"); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -4396,13 +4402,14 @@ static int generateDataBuffer(char *pTblName, } static void* syncWriteInterlace(threadInfo *pThreadInfo) { - printf("### CBD: interlace write\n"); + debugPrint("[%d] %s() LN%d: ### interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1); if (NULL == buffer) { - fprintf(stderr, "Failed to alloc %d Bytes, reason:%s\n", + errorPrint( "Failed to alloc %d Bytes, reason:%s\n", superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, strerror(errno)); return NULL; @@ -4437,8 +4444,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int tableSeq = pThreadInfo->start_table_from; - debugPrint("%s() LN%d: start_table_from=%d ntables=%d insertRows=%"PRId64"\n", - __func__, __LINE__, pThreadInfo->start_table_from, + debugPrint("[%d] %s() LN%d: start_table_from=%d ntables=%d insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, insertRows); int64_t startTime = pThreadInfo->start_time; @@ -4446,80 +4453,111 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int batchPerTblTimes; int batchPerTbl; + assert(pThreadInfo->ntables > 0); + + if (rowsPerTbl > g_args.num_of_RPR) + rowsPerTbl = g_args.num_of_RPR; + + batchPerTbl = rowsPerTbl; if ((rowsPerTbl > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = g_args.num_of_RPR / rowsPerTbl; - batchPerTbl = rowsPerTbl; + batchPerTblTimes = + (g_args.num_of_RPR / (rowsPerTbl * pThreadInfo->ntables)) + 1; } else { batchPerTblTimes = 1; - batchPerTbl = g_args.num_of_RPR; } int generatedRecPerTbl = 0; + bool flagSleep = true; + int sleepTimeTotal = 0; while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if (insert_interval) { + if ((flagSleep) && (insert_interval)) { st = taosGetTimestampUs(); + flagSleep = false; } // generate data memset(buffer, 0, superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len); char *pstr = buffer; - int recGenerated = 0; + int recOfBatch = 0; for (int i = 0; i < batchPerTblTimes; i ++) { getTableName(tableName, pThreadInfo, tableSeq); int headLen; if (i == 0) { - headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, pstr); + headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, + superTblInfo, pstr); } else { - headLen = snprintf(pstr, TSDB_TABLE_NAME_LEN, "%s.%s values", + headLen = snprintf(pstr, TSDB_TABLE_NAME_LEN, "%s.%s values", pThreadInfo->db_name, tableName); } // generate data buffer - verbosePrint("%s() LN%d i=%d buffer:\n%s\n", - __func__, __LINE__, i, buffer); + verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, i, buffer); pstr += headLen; int dataLen = 0; - printf("%s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", - __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - int numOfRecGenerated = generateDataTail( - tableName, tableSeq, pThreadInfo, superTblInfo, - batchPerTbl, pstr, insertRows, 0, - startTime + pThreadInfo->totalInsertRows * superTblInfo->timeStampStep, - &(pThreadInfo->samplePos), &dataLen); - verbosePrint("%s() LN%d numOfRecGenerated= %d\n", - __func__, __LINE__, numOfRecGenerated); + debugPrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", + pThreadInfo->threadID, __func__, __LINE__, + i, batchPerTblTimes, batchPerTbl); + generateDataTail( + tableName, tableSeq, pThreadInfo, superTblInfo, + batchPerTbl, pstr, insertRows, 0, + startTime + sleepTimeTotal + + pThreadInfo->totalInsertRows * superTblInfo->timeStampStep, + &(pThreadInfo->samplePos), &dataLen); pstr += dataLen; - recGenerated += numOfRecGenerated; + recOfBatch += batchPerTbl; + pThreadInfo->totalInsertRows += batchPerTbl; + debugPrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); tableSeq ++; if (insertMode == INTERLACE_INSERT_MODE) { if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { // turn to first table tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += numOfRecGenerated; + generatedRecPerTbl += batchPerTbl; + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; + + if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) + break; } } int remainRows = insertRows - generatedRecPerTbl; - if (batchPerTbl > remainRows) + if ((remainRows > 0) && (batchPerTbl > remainRows)) batchPerTbl = remainRows; - if ((g_args.num_of_RPR - recGenerated) < batchPerTbl) + debugPrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); + + if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl) break; } - pThreadInfo->totalInsertRows += recGenerated; - printf("%s() LN%d recGenerated=%d totalInsertRows=%"PRId64" buffer:\n%s\n", - __func__, __LINE__, recGenerated, - pThreadInfo->totalInsertRows, buffer); - int affectedRows = execInsert(pThreadInfo, buffer, recGenerated); - if (affectedRows < 0) + debugPrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + verbosePrint("[%d] %s() LN%d, buffer=%s\n", + pThreadInfo->threadID, __func__, __LINE__, buffer); + + int affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); + verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + if (affectedRows < 0) { + errorPrint("[%d] %s() LN%d execInsert affected rows: %d\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, + affectedRows, buffer); goto free_and_statistics_interlace; + } pThreadInfo->totalAffectedRows += affectedRows; @@ -4539,15 +4577,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { lastPrintTime = currentPrintTime; } - if (insert_interval) { - et = taosGetTimestampUs(); + if ((insert_interval) && flagSleep) { + et = taosGetTimestampUs(); - if (insert_interval > ((et - st)/1000) ) { - int sleep_time = insert_interval - (et -st)/1000; - verbosePrint("%s() LN%d sleep: %d ms for insert interval\n", - __func__, __LINE__, sleep_time); - taosMsleep(sleep_time); // ms - } + if (insert_interval > ((et - st)/1000) ) { + int sleepTime = insert_interval - (et -st)/1000; +// verbosePrint("%s() LN%d sleep: %d ms for insert interval\n", +// __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } } } @@ -4570,12 +4609,13 @@ free_and_statistics_interlace: 2 taosinsertdata , 1 thread: 10 tables * 20000 rows/s */ static void* syncWriteProgressive(threadInfo *pThreadInfo) { + debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1); if (NULL == buffer) { - fprintf(stderr, "Failed to alloc %d Bytes, reason:%s\n", + errorPrint( "Failed to alloc %d Bytes, reason:%s\n", superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, strerror(errno)); return NULL; @@ -4821,7 +4861,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } else if (0 == strncasecmp(precision, "us", 2)) { timePrec = TSDB_TIME_PRECISION_MICRO; } else { - fprintf(stderr, "No support precision: %s\n", precision); + errorPrint( "No support precision: %s\n", precision); exit(-1); } } @@ -4831,15 +4871,15 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { start_time = taosGetTimestamp(timePrec); } else { - if (TSDB_CODE_SUCCESS != taosParseTime( - superTblInfo->startTimestamp, - &start_time, - strlen(superTblInfo->startTimestamp), - timePrec, 0)) { - fprintf(stderr, "ERROR to parse time!\n"); - exit(-1); - } - } + if (TSDB_CODE_SUCCESS != taosParseTime( + superTblInfo->startTimestamp, + &start_time, + strlen(superTblInfo->startTimestamp), + timePrec, 0)) { + errorPrint("%s() LN%d, failed to parse time!\n", __func__, __LINE__); + exit(-1); + } + } } else { start_time = 1500000000000; } @@ -4857,7 +4897,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample")))) { if (0 != prepareSampleDataForSTable(superTblInfo)) { - fprintf(stderr, "prepare sample data for stable failed!\n"); + errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); exit(-1); } } @@ -4869,15 +4909,15 @@ static void startMultiThreadInsertData(int threads, char* db_name, g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == taos) { - fprintf(stderr, "connect to server fail , reason: %s\n", - taos_errstr(NULL)); + errorPrint("%s() LN%d, connect to server fail , reason: %s\n", + __func__, __LINE__, taos_errstr(NULL)); exit(-1); } superTblInfo->childTblName = (char*)calloc(1, superTblInfo->childTblLimit * TSDB_TABLE_NAME_LEN); if (superTblInfo->childTblName == NULL) { - fprintf(stderr, "alloc memory failed!"); + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); taos_close(taos); exit(-1); } @@ -4896,7 +4936,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample")))) { if (0 != prepareSampleDataForSTable(superTblInfo)) { - fprintf(stderr, "prepare sample data for stable failed!\n"); + errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); exit(-1); } } @@ -4906,8 +4946,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == taos) { - fprintf(stderr, "connect to server fail , reason: %s\n", - taos_errstr(NULL)); + errorPrint("%s() LN%d, connect to server fail , reason: %s\n", + __func__, __LINE__, taos_errstr(NULL)); exit(-1); } @@ -4926,7 +4966,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, superTblInfo->childTblName = (char*)calloc(1, limit * TSDB_TABLE_NAME_LEN); if (superTblInfo->childTblName == NULL) { - fprintf(stderr, "alloc memory failed!"); + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); taos_close(taos); exit(-1); } @@ -4957,7 +4997,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == t_info->taos) { - fprintf(stderr, "connect to server fail from insert sub thread, reason: %s\n", + errorPrint( "connect to server fail from insert sub thread, reason: %s\n", taos_errstr(NULL)); exit(-1); } @@ -5001,6 +5041,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, tsem_destroy(&(t_info->lock_sem)); taos_close(t_info->taos); + debugPrint("%s() LN%d, [%d] totalInsert=%"PRId64" totalAffected=%"PRId64"\n", + __func__, __LINE__, + t_info->threadID, t_info->totalInsertRows, + t_info->totalAffectedRows); if (superTblInfo) { superTblInfo->totalAffectedRows += t_info->totalAffectedRows; superTblInfo->totalInsertRows += t_info->totalInsertRows; @@ -5068,7 +5112,7 @@ void *readTable(void *sarg) { char *tb_prefix = rinfo->tb_prefix; FILE *fp = fopen(rinfo->fp, "a"); if (NULL == fp) { - fprintf(stderr, "fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); + errorPrint( "fopen %s fail, reason:%s.\n", rinfo->fp, strerror(errno)); return NULL; } @@ -5102,7 +5146,7 @@ void *readTable(void *sarg) { int32_t code = taos_errno(pSql); if (code != 0) { - fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); + errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); fclose(fp); @@ -5178,7 +5222,7 @@ void *readMetric(void *sarg) { int32_t code = taos_errno(pSql); if (code != 0) { - fprintf(stderr, "Failed to query:%s\n", taos_errstr(pSql)); + errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); fclose(fp); @@ -5216,7 +5260,7 @@ static int insertTestProcess() { debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); if (NULL == g_fpOfInsertResult) { - fprintf(stderr, "Failed to open %s for save result\n", g_Dbs.resultFile); + errorPrint( "Failed to open %s for save result\n", g_Dbs.resultFile); return -1; } @@ -5407,7 +5451,7 @@ static int queryTestProcess() { NULL, g_queryInfo.port); if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(-1); } @@ -5707,7 +5751,7 @@ static int subscribeTestProcess() { g_queryInfo.dbName, g_queryInfo.port); if (taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(-1); } @@ -6065,7 +6109,7 @@ static void queryResult() { g_Dbs.db[0].dbName, g_Dbs.port); if (rInfo->taos == NULL) { - fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); free(rInfo); exit(-1); } From 9e257230780a2f8406ef0c978e0ef5da6bc21e36 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 18 Mar 2021 17:48:33 +0800 Subject: [PATCH 11/17] remove last_cache.py from full test --- tests/pytest/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 46a1abf12c..d6849dea7d 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -275,7 +275,7 @@ python3 ./test.py -f functions/function_twa.py -r 1 python3 ./test.py -f functions/function_twa_test2.py python3 ./test.py -f functions/function_stddev_td2555.py python3 ./test.py -f insert/metadataUpdate.py -python3 ./test.py -f query/last_cache.py +#python3 ./test.py -f query/last_cache.py python3 ./test.py -f query/last_row_cache.py python3 ./test.py -f account/account_create.py python3 ./test.py -f alter/alter_table.py From 9122ca3b1d4b91ec939572b8cb9d75423ea378ca Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 18 Mar 2021 09:56:04 +0000 Subject: [PATCH 12/17] TD-3367 --- src/vnode/src/vnodeMain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index d25cf10774..ded39e67cc 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -147,7 +147,7 @@ static int32_t vnodeAlterImp(SVnodeObj *pVnode, SCreateVnodeMsg *pVnodeCfg) { vDebug("vgId:%d, tsdbchanged:%d syncchanged:%d while alter vnode", pVnode->vgId, tsdbCfgChanged, syncCfgChanged); - if (tsdbCfgChanged || syncCfgChanged) { + if (/*tsdbCfgChanged || */syncCfgChanged) { // vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS // dbCfgVersion can be corrected by status msg if (!vnodeSetUpdatingStatus(pVnode)) { From a9ea81aaee54415406307b398c643c29533d333c Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 18 Mar 2021 18:13:44 +0800 Subject: [PATCH 13/17] update docker cluster script --- tests/pytest/cluster/clusterEnvSetup/basic.py | 39 +++++++++++----- .../clusterEnvSetup/buildClusterEnv.sh | 45 ++++++++++--------- .../clusterEnvSetup/cleanClusterEnv.sh | 39 ++++++++++++++++ 3 files changed, 92 insertions(+), 31 deletions(-) create mode 100755 tests/pytest/cluster/clusterEnvSetup/cleanClusterEnv.sh diff --git a/tests/pytest/cluster/clusterEnvSetup/basic.py b/tests/pytest/cluster/clusterEnvSetup/basic.py index d9b8e9ce4a..eb39051898 100644 --- a/tests/pytest/cluster/clusterEnvSetup/basic.py +++ b/tests/pytest/cluster/clusterEnvSetup/basic.py @@ -18,13 +18,15 @@ import argparse class BuildDockerCluser: - def __init__(self, hostName, user, password, configDir, numOfNodes, clusterVersion): + def __init__(self, hostName, user, password, configDir, numOfNodes, clusterVersion, dockerDir, removeFlag): self.hostName = hostName self.user = user self.password = password self.configDir = configDir self.numOfNodes = numOfNodes - self.clusterVersion = clusterVersion + self.clusterVersion = clusterVersion + self.dockerDir = dockerDir + self.removeFlag = removeFlag def getConnection(self): self.conn = taos.connect( @@ -42,14 +44,17 @@ class BuildDockerCluser: print("start arbitrator") os.system("docker exec -d $(docker ps|grep tdnode1|awk '{print $1}') tarbitrator") - def run(self): + def run(self): if self.numOfNodes < 2 or self.numOfNodes > 5: print("the number of nodes must be between 2 and 5") - exit(0) - os.system("./buildClusterEnv.sh -n %d -v %s" % (self.numOfNodes, self.clusterVersion)) + exit(0) + print("remove Flag value %s" % self.removeFlag) + if self.removeFlag == False: + os.system("./cleanClusterEnv.sh -d %s" % self.dockerDir) + os.system("./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.clusterVersion, self.dockerDir)) self.getConnection() self.createDondes() - self.startArbitrator() + self.startArbitrator() parser = argparse.ArgumentParser() parser.add_argument( @@ -91,10 +96,24 @@ parser.add_argument( '-v', '--version', action='store', - default='2.0.14.1', + default='2.0.17.1', type=str, - help='the version of the cluster to be build, Default is 2.0.14.1') + help='the version of the cluster to be build, Default is 2.0.17.1') +parser.add_argument( + '-d', + '--docker-dir', + action='store', + default='/data', + type=str, + help='the data dir for docker, default is /data') +parser.add_argument( + '--flag', + action='store_true', + help='remove docker containers flag, default: True') args = parser.parse_args() -cluster = BuildDockerCluser(args.host, args.user, args.password, args.config_dir, args.num_of_nodes, args.version) -cluster.run() \ No newline at end of file +cluster = BuildDockerCluser(args.host, args.user, args.password, args.config_dir, args.num_of_nodes, args.version, args.docker_dir, args.flag) +cluster.run() + +# usage 1: python3 basic.py -n 2 --flag (flag is True) +# usage 2: python3 basic.py -n 2 (flag should be False when it is not specified) \ No newline at end of file diff --git a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh b/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh index 968cdd1c1c..992ed8b8fb 100755 --- a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh +++ b/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh @@ -1,18 +1,19 @@ #!/bin/bash echo "Executing buildClusterEnv.sh" -DOCKER_DIR=/data CURR_DIR=`pwd` -if [ $# != 4 ]; then +if [ $# != 6 ]; then echo "argument list need input : " echo " -n numOfNodes" - echo " -v version" + echo " -v version" + echo " -d docker dir" exit 1 fi NUM_OF_NODES= VERSION= -while getopts "n:v:" arg +DOCKER_DIR= +while getopts "n:v:d:" arg do case $arg in n) @@ -20,6 +21,9 @@ do ;; v) VERSION=$OPTARG + ;; + d) + DOCKER_DIR=$OPTARG ;; ?) echo "unkonwn argument" @@ -31,29 +35,28 @@ function addTaoscfg { for i in {1..5} do touch /data/node$i/cfg/taos.cfg - echo 'firstEp tdnode1:6030' > /data/node$i/cfg/taos.cfg - echo 'fqdn tdnode'$i >> /data/node$i/cfg/taos.cfg - echo 'arbitrator tdnode1:6042' >> /data/node$i/cfg/taos.cfg + echo 'firstEp tdnode1:6030' > $DOCKER_DIR/node$i/cfg/taos.cfg + echo 'fqdn tdnode'$i >> $DOCKER_DIR/node$i/cfg/taos.cfg + echo 'arbitrator tdnode1:6042' >> $DOCKER_DIR/node$i/cfg/taos.cfg done } function createDIR { for i in {1..5} do - mkdir -p /data/node$i/data - mkdir -p /data/node$i/log - mkdir -p /data/node$i/cfg - mkdir -p /data/node$i/core + mkdir -p $DOCKER_DIR/node$i/data + mkdir -p $DOCKER_DIR/node$i/log + mkdir -p $DOCKER_DIR/node$i/cfg + mkdir -p $DOCKER_DIR/node$i/core done } -function cleanEnv { +function cleanEnv { + echo "Clean up docker environment" for i in {1..5} - do - echo /data/node$i/data/* - rm -rf /data/node$i/data/* - echo /data/node$i/log/* - rm -rf /data/node$i/log/* + do + rm -rf $DOCKER_DIR/node$i/data/* + rm -rf $DOCKER_DIR/node$i/log/* done } @@ -98,19 +101,19 @@ function clusterUp { if [ $NUM_OF_NODES -eq 2 ]; then echo "create 2 dnodes" - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose up -d fi if [ $NUM_OF_NODES -eq 3 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml up -d fi if [ $NUM_OF_NODES -eq 4 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml up -d fi if [ $NUM_OF_NODES -eq 5 ]; then - PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml -f node5.yml up -d + PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml -f node5.yml up -d fi echo "docker compose finish" diff --git a/tests/pytest/cluster/clusterEnvSetup/cleanClusterEnv.sh b/tests/pytest/cluster/clusterEnvSetup/cleanClusterEnv.sh new file mode 100755 index 0000000000..006db3f4eb --- /dev/null +++ b/tests/pytest/cluster/clusterEnvSetup/cleanClusterEnv.sh @@ -0,0 +1,39 @@ +#!/bin/bash +echo "Executing cleanClusterEnv.sh" +CURR_DIR=`pwd` + +if [ $# != 2 ]; then + echo "argument list need input : " + echo " -d docker dir" + exit 1 +fi + +DOCKER_DIR= +while getopts "d:" arg +do + case $arg in + d) + DOCKER_DIR=$OPTARG + ;; + ?) + echo "unkonwn argument" + ;; + esac +done + +function removeDockerContainers { + cd $DOCKER_DIR + docker-compose down --remove-orphans +} + +function cleanEnv { + echo "Clean up docker environment" + for i in {1..5} + do + rm -rf $DOCKER_DIR/node$i/data/* + rm -rf $DOCKER_DIR/node$i/log/* + done +} + +removeDockerContainers +cleanEnv \ No newline at end of file From 388887efaa6245f8435cfbc03f3cb24bc976b9d7 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Thu, 18 Mar 2021 18:20:59 +0800 Subject: [PATCH 14/17] [TD-2354] : need reboot to take effect when alter CACHELAST option. --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 02352357e1..2248b11987 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -125,7 +125,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ```mysql ALTER DATABASE db_name CACHELAST 0; ``` - CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0,取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11 版本开始支持) + CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0,取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11 版本开始支持,修改后需要重启服务器生效。) **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。 From e9ff56e37fd509b05181a441c71ac4adfebe7928 Mon Sep 17 00:00:00 2001 From: tomchon Date: Thu, 18 Mar 2021 18:54:36 +0800 Subject: [PATCH 15/17] [TD-2943] --- packaging/tools/remove.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index 2f2660d446..e63889aff1 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -213,10 +213,10 @@ fi if echo $osinfo | grep -qwi "ubuntu" ; then # echo "this is ubuntu system" - ${csudo} rm -f /var/lib/dpkg/info/tdengine* || : + ${csudo} dpkg --force-all -P tdengine || : elif echo $osinfo | grep -qwi "debian" ; then # echo "this is debian system" - ${csudo} rm -f /var/lib/dpkg/info/tdengine* || : + ${csudo} dpkg --force-all -P tdengine || : elif echo $osinfo | grep -qwi "centos" ; then # echo "this is centos system" ${csudo} rpm -e --noscripts tdengine || : From 624a9191ceeb49afe68eb7bf90d522d4d6d5027b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 18 Mar 2021 19:17:19 +0800 Subject: [PATCH 16/17] [TD-3357] : fix child table count if exists. (#5488) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 81 ++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 36 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 355f738885..46f9a52afe 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -101,8 +101,8 @@ typedef enum CREATE_SUB_TALBE_MOD_EN { } CREATE_SUB_TALBE_MOD_EN; typedef enum TALBE_EXISTS_EN { - TBL_ALREADY_EXISTS, TBL_NO_EXISTS, + TBL_ALREADY_EXISTS, TBL_EXISTS_BUTT } TALBE_EXISTS_EN; @@ -2401,8 +2401,11 @@ static int createDatabases() { &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric); } else { g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS; - ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, + + if (g_Dbs.db[i].superTbls[j].childTblExists != TBL_ALREADY_EXISTS) { + ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]); + } } if (0 != ret) { @@ -2794,9 +2797,10 @@ void readSampleFromFileToMem(SSuperTable * supterTblInfo) { } } */ -static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* superTbls) { +static bool getColumnAndTagTypeFromInsertJsonFile( + cJSON* stbInfo, SSuperTable* superTbls) { bool ret = false; - + // columns cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); if (columns && columns->type != cJSON_Array) { @@ -2807,7 +2811,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s superTbls->tagCount = 0; return true; } - + int columnSize = cJSON_GetArraySize(columns); if (columnSize > MAX_COLUMN_COUNT) { errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", @@ -2819,7 +2823,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s int index = 0; StrColumn columnCase; - //superTbls->columnCount = columnSize; + //superTbls->columnCount = columnSize; for (int k = 0; k < columnSize; ++k) { cJSON* column = cJSON_GetArrayItem(columns, k); if (column == NULL) continue; @@ -2827,7 +2831,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s count = 1; cJSON* countObj = cJSON_GetObjectItem(column, "count"); if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; + count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { errorPrint("%s() LN%d, failed to read json, column count not found\n", __func__, __LINE__); goto PARSE_OVER; @@ -2844,25 +2848,26 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - + cJSON* dataLen = cJSON_GetObjectItem(column, "len"); if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; + columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { debugPrint("%s() LN%d: failed to read json, column len not found\n", __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 8; } - + for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->columns[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); - superTbls->columns[index].dataLen = columnCase.dataLen; + tstrncpy(superTbls->columns[index].dataType, + columnCase.dataType, MAX_TB_NAME_SIZE); + superTbls->columns[index].dataLen = columnCase.dataLen; index++; } - } - superTbls->columnCount = index; - + } + superTbls->columnCount = index; + count = 1; index = 0; // tags @@ -2877,16 +2882,16 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s debugPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", __func__, __LINE__, MAX_TAG_COUNT); goto PARSE_OVER; } - - //superTbls->tagCount = tagSize; + + //superTbls->tagCount = tagSize; for (int k = 0; k < tagSize; ++k) { cJSON* tag = cJSON_GetArrayItem(tags, k); if (tag == NULL) continue; - + count = 1; cJSON* countObj = cJSON_GetObjectItem(tag, "count"); if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; + count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { printf("ERROR: failed to read json, column count not found\n"); goto PARSE_OVER; @@ -2902,23 +2907,23 @@ static bool getColumnAndTagTypeFromInsertJsonFile(cJSON* stbInfo, SSuperTable* s goto PARSE_OVER; } tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - + cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; + columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { printf("ERROR: failed to read json, column len not found\n"); goto PARSE_OVER; } else { columnCase.dataLen = 0; - } - + } + for (int n = 0; n < count; ++n) { tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, MAX_TB_NAME_SIZE); - superTbls->tags[index].dataLen = columnCase.dataLen; + superTbls->tags[index].dataLen = columnCase.dataLen; index++; } - } + } superTbls->tagCount = index; ret = true; @@ -3103,8 +3108,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); - if (precision && precision->type == cJSON_String && precision->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, MAX_DB_NAME_SIZE); + if (precision && precision->type == cJSON_String + && precision->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, + MAX_DB_NAME_SIZE); } else if (!precision) { //tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); @@ -3330,13 +3337,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!childTblExists) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { - printf("ERROR: failed to read json, child_table_exists not found\n"); + errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n", __func__, __LINE__); goto PARSE_OVER; } cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - printf("ERROR: failed to read json, childtable_count not found\n"); + errorPrint("%s() LN%d, failed to read json, childtable_count not found\n", __func__, __LINE__); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; @@ -3349,7 +3356,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!dataSource) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); } else { - printf("ERROR: failed to read json, data_source not found\n"); + errorPrint("%s() LN%d, failed to read json, data_source not found\n", __func__, __LINE__); goto PARSE_OVER; } @@ -3546,12 +3553,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable +/* CBD if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { continue; } + */ - int retVal = getColumnAndTagTypeFromInsertJsonFile(stbInfo, &g_Dbs.db[i].superTbls[j]); + int retVal = getColumnAndTagTypeFromInsertJsonFile( + stbInfo, &g_Dbs.db[i].superTbls[j]); if (false == retVal) { goto PARSE_OVER; } @@ -4500,7 +4509,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pstr += headLen; int dataLen = 0; - debugPrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", + verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); generateDataTail( @@ -4512,7 +4521,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pstr += dataLen; recOfBatch += batchPerTbl; pThreadInfo->totalInsertRows += batchPerTbl; - debugPrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, recOfBatch); @@ -4535,7 +4544,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if ((remainRows > 0) && (batchPerTbl > remainRows)) batchPerTbl = remainRows; - debugPrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, generatedRecPerTbl, insertRows); @@ -4543,7 +4552,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { break; } - debugPrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRId64"\n", + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRId64"\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, pThreadInfo->totalInsertRows); verbosePrint("[%d] %s() LN%d, buffer=%s\n", From d8f1b34f5664fcfb4bec08b2ac18a46ea7057e02 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 18 Mar 2021 21:33:52 +0800 Subject: [PATCH 17/17] [TD-3356] : fix bug regarding limit offset with exist child table. (#5495) Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 54 +++++++++---------------------------- 1 file changed, 13 insertions(+), 41 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 46f9a52afe..2647d32ef5 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2044,7 +2044,8 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, //printf("==== sub table name: %s\n", pTblName); count++; if (count >= childTblCount - 1) { - char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); + char *tmp = realloc(childTblName, + (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); if (tmp != NULL) { childTblName = tmp; childTblCount = (int)(childTblCount*1.5); @@ -2052,14 +2053,15 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); } else { // exit, if allocate more memory failed - printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName); + errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", + __func__, __LINE__, dbName, sTblName); tmfree(childTblName); taos_free_result(res); taos_close(taos); exit(-1); } } - pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; + pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; } *childTblCountOfSuperTbl = count; @@ -2140,6 +2142,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, calcRowLen(superTbls); +/* if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { //get all child table name use cmd: select tbname from superTblName; int childTblCount = 10000; @@ -2153,6 +2156,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, &superTbls->childTblName, &superTbls->childTblCount); } + */ return 0; } @@ -4356,7 +4360,7 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, threadInfo* pThrea superTblInfo->maxSqlLen, "insert into %s.%s values", pThreadInfo->db_name, - superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + tableName); } else { len = snprintf(buffer, (superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len), @@ -4561,10 +4565,10 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int affectedRows = execInsert(pThreadInfo, buffer, recOfBatch); verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, __func__, __LINE__, affectedRows); - if (affectedRows < 0) { - errorPrint("[%d] %s() LN%d execInsert affected rows: %d\n%s\n", + if ((affectedRows < 0) || (recOfBatch != affectedRows)) { + errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %d\n%s\n", pThreadInfo->threadID, __func__, __LINE__, - affectedRows, buffer); + recOfBatch, affectedRows, buffer); goto free_and_statistics_interlace; } @@ -4885,10 +4889,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, &start_time, strlen(superTblInfo->startTimestamp), timePrec, 0)) { - errorPrint("%s() LN%d, failed to parse time!\n", __func__, __LINE__); - exit(-1); + ERROR_EXIT("failed to parse time!\n"); } - } + } } else { start_time = 1500000000000; } @@ -4911,36 +4914,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } - if (superTblInfo && (superTblInfo->childTblOffset >= 0) - && (superTblInfo->childTblLimit > 0)) { - - TAOS* taos = taos_connect( - g_Dbs.host, g_Dbs.user, - g_Dbs.password, db_name, g_Dbs.port); - if (NULL == taos) { - errorPrint("%s() LN%d, connect to server fail , reason: %s\n", - __func__, __LINE__, taos_errstr(NULL)); - exit(-1); - } - - superTblInfo->childTblName = (char*)calloc(1, - superTblInfo->childTblLimit * TSDB_TABLE_NAME_LEN); - if (superTblInfo->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); - taos_close(taos); - exit(-1); - } - int childTblCount; - - getChildNameOfSuperTableWithLimitAndOffset( - taos, - db_name, superTblInfo->sTblName, - &superTblInfo->childTblName, &childTblCount, - superTblInfo->childTblLimit, - superTblInfo->childTblOffset); - taos_close(taos); - } - // read sample data from file first if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample")))) { @@ -4950,7 +4923,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } - TAOS* taos = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port);