From ba5a25573701cc10712253e220b6011c070aaef8 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 18 Jun 2021 17:03:11 +0800 Subject: [PATCH 01/38] [feature]: implement feature TD-4700 --- src/util/src/tlog.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 7f127fc396..fb7d26601a 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -94,6 +94,7 @@ static SLogBuff *taosLogBuffNew(int32_t bufSize); static void taosCloseLogByFd(int32_t oldFd); static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum); extern void taosPrintGlobalCfg(); +static volatile int8_t tsNoDisk = 0; static int32_t taosStartLog() { pthread_attr_t threadAttr; @@ -363,9 +364,17 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) { if (tsTotalLogDirGB != 0 && tsAvailLogDirGB < tsMinimalLogDirGB) { - printf("server disk:%s space remain %.3f GB, total %.1f GB, stop print log.\n", tsLogDir, tsAvailLogDirGB, tsTotalLogDirGB); + char buf[256] = "\0"; + sprintf(buf, "server disk:%s space remain %.3f GB, total %.1f GB, stop print log.\n", tsLogDir, tsAvailLogDirGB, + tsTotalLogDirGB); + if (atomic_val_compare_exchange_8(&tsNoDisk, 0, 1) == 1) { + taosWrite(tsLogObj.logHandle->fd, buf, (uint32_t)strlen(buf)); + } + puts(buf); fflush(stdout); return; + } else { + atomic_store_8(&tsNoDisk, 0); } va_list argpointer; From ebd2fdc1564bc9c65241239f4c6df71879b2b935 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Wed, 23 Jun 2021 14:44:11 +0800 Subject: [PATCH 02/38] [TD-4872]: fix buffer overflow in -O3 build --- src/tfs/src/tfs.c | 4 +++- src/util/src/tconfig.c | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/tfs/src/tfs.c b/src/tfs/src/tfs.c index f78535b8ed..9dc68dcdfd 100644 --- a/src/tfs/src/tfs.c +++ b/src/tfs/src/tfs.c @@ -480,11 +480,13 @@ static int tfsFormatDir(char *idir, char *odir) { return -1; } - if (realpath(wep.we_wordv[0], odir) == NULL) { + char tmp[PATH_MAX] = {0}; + if (realpath(wep.we_wordv[0], tmp) == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); wordfree(&wep); return -1; } + strcpy(odir, tmp); wordfree(&wep); return 0; diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index c4bd577602..442e83bb4f 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -151,7 +151,7 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { wordfree(&full_path); - char tmp[1025] = {0}; + char tmp[PATH_MAX] = {0}; if (realpath(option, tmp) != NULL) { strcpy(option, tmp); } From ff492acbbfb620d2199aaa44f5646fc90d715e92 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Thu, 24 Jun 2021 11:36:41 +0800 Subject: [PATCH 03/38] [TD-4872]: fix tasodemo buffer overflow with -O3 --- src/kit/taosdemo/taosdemo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 33ee2a9bc2..1bb057aa67 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5103,7 +5103,7 @@ static int32_t generateStbDataTail( } else { retLen = getRowDataFromSample( data, - remainderBufLen, + remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE, startTime + superTblInfo->timeStampStep * k, superTblInfo, pSamplePos); From a9c1f6bc73a76fa32d865ad22984198526f57223 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Thu, 24 Jun 2021 12:40:40 +0800 Subject: [PATCH 04/38] [TD-4872]: fix tasodump buffer overflow with -O3 --- src/kit/taosdump/taosdump.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 165bbdf990..05c6b1efbb 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -29,6 +29,9 @@ #define COMMAND_SIZE 65536 //#define DEFAULT_DUMP_FILE "taosdump.sql" +// for strncpy buffer overflow +#define min(a, b) (((a) < (b)) ? (a) : (b)) + int converStringToReadable(char *str, int size, char *buf, int bufsize); int convertNCharToReadable(char *str, int size, char *buf, int bufsize); void taosDumpCharset(FILE *fp); @@ -1119,12 +1122,11 @@ int taosGetTableDes( TAOS_FIELD *fields = taos_fetch_fields(res); tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); - while ((row = taos_fetch_row(res)) != NULL) { strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes)); tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); @@ -1575,7 +1577,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], - fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes)); taosWrite(fd, &tableRecord, sizeof(STableRecord)); From 8932d665299db36ba7ddcbf73a90f7cdca7a1640 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Thu, 24 Jun 2021 15:52:27 +0800 Subject: [PATCH 05/38] fix cqCfg.db length is small bug --- src/inc/tcq.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/inc/tcq.h b/src/inc/tcq.h index 7549c3d498..c7eeaca2cc 100644 --- a/src/inc/tcq.h +++ b/src/inc/tcq.h @@ -27,7 +27,7 @@ typedef struct { int32_t vgId; char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; - char db[TSDB_DB_NAME_LEN]; + char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; FCqWrite cqWrite; } SCqCfg; @@ -38,7 +38,7 @@ typedef struct { int32_t num; // number of continuous streams char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; - char db[TSDB_DB_NAME_LEN]; + char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; FCqWrite cqWrite; struct SCqObj *pHead; void *dbConn; From 943d69ea80fab197e33224ddef94b93bf5d00ef5 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 25 Jun 2021 15:11:29 +0800 Subject: [PATCH 06/38] restore CqContext db name length --- src/inc/tcq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/inc/tcq.h b/src/inc/tcq.h index c7eeaca2cc..27c043f960 100644 --- a/src/inc/tcq.h +++ b/src/inc/tcq.h @@ -38,7 +38,7 @@ typedef struct { int32_t num; // number of continuous streams char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; - char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; + char db[TSDB_DB_NAME_LEN]; FCqWrite cqWrite; struct SCqObj *pHead; void *dbConn; From 18b748576e3e91ba863b1ad8ed932426d143737b Mon Sep 17 00:00:00 2001 From: Jun Li Date: Sun, 27 Jun 2021 20:23:21 -0700 Subject: [PATCH 07/38] Fix message memory allocation size bug --- src/dnode/src/dnodeVWrite.c | 8 ++++---- src/inc/vnode.h | 2 +- src/vnode/src/vnodeWrite.c | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 26084a52eb..c751e5ff70 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -202,12 +202,12 @@ static void *dnodeProcessVWriteQueue(void *wparam) { for (int32_t i = 0; i < numOfMsgs; ++i) { taosGetQitem(pWorker->qall, &qtype, (void **)&pWrite); dTrace("msg:%p, app:%p type:%s will be processed in vwrite queue, qtype:%s hver:%" PRIu64, pWrite, - pWrite->rpcMsg.ahandle, taosMsg[pWrite->pHead.msgType], qtypeStr[qtype], pWrite->pHead.version); + pWrite->rpcMsg.ahandle, taosMsg[pWrite->walHead.msgType], qtypeStr[qtype], pWrite->walHead.version); - pWrite->code = vnodeProcessWrite(pVnode, &pWrite->pHead, qtype, pWrite); + pWrite->code = vnodeProcessWrite(pVnode, &pWrite->walHead, qtype, pWrite); if (pWrite->code <= 0) atomic_add_fetch_32(&pWrite->processedCount, 1); if (pWrite->code > 0) pWrite->code = 0; - if (pWrite->code == 0 && pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; + if (pWrite->code == 0 && pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; dTrace("msg:%p is processed in vwrite queue, code:0x%x", pWrite, pWrite->code); } @@ -222,7 +222,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) { dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code); } else { if (qtype == TAOS_QTYPE_FWD) { - vnodeConfirmForward(pVnode, pWrite->pHead.version, pWrite->code, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT); + vnodeConfirmForward(pVnode, pWrite->walHead.version, pWrite->code, pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT); } if (pWrite->rspRet.rsp) { rpcFreeCont(pWrite->rspRet.rsp); diff --git a/src/inc/vnode.h b/src/inc/vnode.h index 9dae862344..e6f94fbead 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -49,7 +49,7 @@ typedef struct { SRpcMsg rpcMsg; SRspRet rspRet; char reserveForSync[24]; - SWalHead pHead; + SWalHead walHead; } SVWriteMsg; // vnodeStatus diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 555eda6d13..ac483cc265 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -90,7 +90,7 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // forward to peers, even it is WAL/FWD, it shall be called to update version in sync int32_t syncCode = 0; - bool force = (pWrite == NULL ? false : pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT); + bool force = (pWrite == NULL ? false : pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT); syncCode = syncForwardToPeer(pVnode->sync, pHead, pWrite, qtype, force); if (syncCode < 0) { pHead->version = 0; @@ -237,7 +237,7 @@ static SVWriteMsg *vnodeBuildVWriteMsg(SVnodeObj *pVnode, SWalHead *pHead, int32 return NULL; } - int32_t size = sizeof(SVWriteMsg) + sizeof(SWalHead) + pHead->len; + int32_t size = sizeof(SVWriteMsg) + pHead->len; SVWriteMsg *pWrite = taosAllocateQitem(size); if (pWrite == NULL) { terrno = TSDB_CODE_VND_OUT_OF_MEMORY; @@ -248,7 +248,7 @@ static SVWriteMsg *vnodeBuildVWriteMsg(SVnodeObj *pVnode, SWalHead *pHead, int32 pWrite->rpcMsg = *pRpcMsg; } - memcpy(&pWrite->pHead, pHead, sizeof(SWalHead) + pHead->len); + memcpy(&pWrite->walHead, pHead, sizeof(SWalHead) + pHead->len); pWrite->pVnode = pVnode; pWrite->qtype = qtype; @@ -286,7 +286,7 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1); - int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); + int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->walHead.len); if (queued > MAX_QUEUED_MSG_NUM || queuedSize > MAX_QUEUED_MSG_SIZE) { int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3; @@ -330,7 +330,7 @@ void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) { SVnodeObj *pVnode = vparam; if (pVnode) { int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1); - int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); + int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->walHead.len); vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle, queued, queuedSize); From 84772d1e54d24a17c21931fcb45cb8d22a4c0d1c Mon Sep 17 00:00:00 2001 From: Jun Li Date: Thu, 1 Jul 2021 00:39:26 -0700 Subject: [PATCH 08/38] Add mutext protection for stop flag --- src/wal/src/walMgmt.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 55ab9b031b..9bd5cdf175 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -36,9 +36,16 @@ static int32_t walInitObj(SWal *pWal); static void walFreeObj(void *pWal); int32_t walInit() { + int32_t code = 0; tsWal.refId = taosOpenRef(TSDB_MIN_VNODES, walFreeObj); - int32_t code = walCreateThread(); + code = pthread_mutex_init(&tsWal.mutex, NULL); + if (code) { + wError("failed to init wal mutex since %s", tstrerror(code)); + return code; + } + + code = walCreateThread(); if (code != TSDB_CODE_SUCCESS) { wError("failed to init wal module since %s", tstrerror(code)); return code; @@ -51,6 +58,7 @@ int32_t walInit() { void walCleanUp() { walStopThread(); taosCloseRef(tsWal.refId); + pthread_mutex_destroy(&tsWal.mutex); wInfo("wal module is cleaned up"); } @@ -183,10 +191,15 @@ static void walFsyncAll() { } static void *walThreadFunc(void *param) { + int stop = 0; while (1) { walUpdateSeq(); walFsyncAll(); - if (tsWal.stop) break; + + pthread_mutex_lock(&tsWal.mutex); + stop = tsWal.stop; + pthread_mutex_unlock(&tsWal.mutex); + if (stop) break; } return NULL; @@ -209,7 +222,10 @@ static int32_t walCreateThread() { } static void walStopThread() { + pthread_mutex_lock(&tsWal.mutex); tsWal.stop = 1; + pthread_mutex_unlock(&tsWal.mutex); + if (taosCheckPthreadValid(tsWal.thread)) { pthread_join(tsWal.thread, NULL); } From 3875ac107c4497003960b3f498a9d802bd710809 Mon Sep 17 00:00:00 2001 From: jtcheng Date: Mon, 5 Jul 2021 13:53:10 +0800 Subject: [PATCH 09/38] [TD-6722]: Fix 'table', 'super table', 'stream table' show pattern issue --- src/mnode/src/mnodeTable.c | 42 ++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index b36fb3155c..1287ff6212 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -104,6 +104,20 @@ static void mnodeDestroyChildTable(SCTableObj *pTable) { tfree(pTable); } +static char* mnodeGetTableShowPattern(SShowObj *pShow) { + char* pattern = NULL; + if (pShow != NULL && pShow->payloadLen > 0) { + pattern = (char*)malloc(pShow->payloadLen + 1); + if (pattern == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return NULL; + } + memcpy(pattern, pShow->payload, pShow->payloadLen); + pattern[pShow->payloadLen] = 0; + } + return pattern; +} + static int32_t mnodeChildTableActionDestroy(SSdbRow *pRow) { mnodeDestroyChildTable(pRow->pObj); return TSDB_CODE_SUCCESS; @@ -1611,6 +1625,11 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; char stableName[TSDB_TABLE_NAME_LEN] = {0}; + char* pattern = mnodeGetTableShowPattern(pShow); + if (pShow->payloadLen > 0 && pattern == NULL) { + return 0; + } + while (numOfRows < rows) { pShow->pIter = mnodeGetNextSuperTable(pShow->pIter, &pTable); if (pTable == NULL) break; @@ -1622,7 +1641,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, memset(stableName, 0, tListLen(stableName)); mnodeExtractTableName(pTable->info.tableId, stableName); - if (pShow->payloadLen > 0 && patternMatch(pShow->payload, stableName, sizeof(stableName) - 1, &info) != TSDB_PATTERN_MATCH) { + if (pShow->payloadLen > 0 && patternMatch(pattern, stableName, sizeof(stableName) - 1, &info) != TSDB_PATTERN_MATCH) { mnodeDecTableRef(pTable); continue; } @@ -1662,6 +1681,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, mnodeVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow); mnodeDecDbRef(pDb); + free(pattern); return numOfRows; } @@ -3130,15 +3150,9 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows char prefix[64] = {0}; int32_t prefixLen = (int32_t)tableIdPrefix(pDb->name, prefix, 64); - char* pattern = NULL; - if (pShow->payloadLen > 0) { - pattern = (char*)malloc(pShow->payloadLen + 1); - if (pattern == NULL) { - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return 0; - } - memcpy(pattern, pShow->payload, pShow->payloadLen); - pattern[pShow->payloadLen] = 0; + char* pattern = mnodeGetTableShowPattern(pShow); + if (pShow->payloadLen > 0 && pattern == NULL) { + return 0; } while (numOfRows < rows) { @@ -3370,6 +3384,11 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro strcat(prefix, TS_PATH_DELIMITER); int32_t prefixLen = (int32_t)strlen(prefix); + char* pattern = mnodeGetTableShowPattern(pShow); + if (pShow->payloadLen > 0 && pattern == NULL) { + return 0; + } + while (numOfRows < rows) { pShow->pIter = mnodeGetNextChildTable(pShow->pIter, &pTable); if (pTable == NULL) break; @@ -3385,7 +3404,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro // pattern compare for table name mnodeExtractTableName(pTable->info.tableId, tableName); - if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) { + if (pShow->payloadLen > 0 && patternMatch(pattern, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) { mnodeDecTableRef(pTable); continue; } @@ -3417,6 +3436,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro mnodeVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow); mnodeDecDbRef(pDb); + free(pattern); return numOfRows; } From c2f32ad9c1cc67f5da01ed86689a13e645b43ac8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 8 Jul 2021 23:33:12 +0800 Subject: [PATCH 10/38] [TD-5134] fix runtime error --- src/client/src/tscSQLParser.c | 6 +++--- src/client/src/tscSubquery.c | 7 ++++--- src/client/src/tscUtil.c | 6 ++++-- src/query/src/qExecutor.c | 13 +++++++++---- 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index c2542fb8c6..9e8cb325a8 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2766,8 +2766,7 @@ static bool isTablenameToken(SStrToken* token) { SStrToken tableToken = {0}; extractTableNameFromToken(&tmpToken, &tableToken); - - return (strncasecmp(TSQL_TBNAME_L, tmpToken.z, tmpToken.n) == 0 && tmpToken.n == strlen(TSQL_TBNAME_L)); + return (tmpToken.n == strlen(TSQL_TBNAME_L) && strncasecmp(TSQL_TBNAME_L, tmpToken.z, tmpToken.n) == 0); } static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken* pToken) { @@ -2798,7 +2797,8 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum if (isTablenameToken(pToken)) { pIndex->columnIndex = TSDB_TBNAME_COLUMN_INDEX; - } else if (strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) { + } else if (strlen(DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == pToken->n && + strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) { pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; } else { // not specify the table name, try to locate the table index by column name diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 8ab3512cba..804878fa45 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3047,9 +3047,10 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) pParentObj->cmd.insertParam.schemaAttached = 1; } } - - if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) { - tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, pSupporter->index, pParentObj->subState.numOfSub); + + int32_t suppIdx = pSupporter->index; + if (!subAndCheckDone(tres, pParentObj, suppIdx)) { + tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub); return; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index b42724a2ec..3195e74f02 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -297,7 +297,7 @@ bool tscHasColumnFilter(SQueryInfo* pQueryInfo) { size_t size = taosArrayGetSize(pQueryInfo->colList); for (int32_t i = 0; i < size; ++i) { - SColumn* pCol = taosArrayGet(pQueryInfo->colList, i); + SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i); if (pCol->info.flist.numOfFilters > 0) { return true; } @@ -4382,7 +4382,9 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt if (pQueryAttr->fillType != TSDB_FILL_NONE) { pQueryAttr->fillVal = calloc(pQueryAttr->numOfOutput, sizeof(int64_t)); - memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, pQueryAttr->numOfOutput * sizeof(int64_t)); + int32_t fields = tscNumOfFields(pQueryInfo); + int32_t cpySize = fields < pQueryAttr->numOfOutput ? fields : pQueryAttr->numOfOutput; + memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, cpySize * sizeof(int64_t)); } pQueryAttr->srcRowSize = 0; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index ef88f8bc06..ecdbc3be4f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5956,8 +5956,13 @@ SColumnInfo* extractColumnFilterInfo(SExprInfo* pExpr, int32_t numOfOutput, int3 pCols[i].colId = pExpr[i].base.resColId; pCols[i].flist.numOfFilters = pExpr[i].base.flist.numOfFilters; - pCols[i].flist.filterInfo = calloc(pCols[i].flist.numOfFilters, sizeof(SColumnFilterInfo)); - memcpy(pCols[i].flist.filterInfo, pExpr[i].base.flist.filterInfo, pCols[i].flist.numOfFilters * sizeof(SColumnFilterInfo)); + if (pCols[i].flist.numOfFilters != 0) { + pCols[i].flist.filterInfo = calloc(pCols[i].flist.numOfFilters, sizeof(SColumnFilterInfo)); + memcpy(pCols[i].flist.filterInfo, pExpr[i].base.flist.filterInfo, pCols[i].flist.numOfFilters * sizeof(SColumnFilterInfo)); + } else { + // avoid runtime error + pCols[i].flist.filterInfo = NULL; + } } assert(numOfFilter > 0); @@ -6416,10 +6421,10 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { if (isNull(val, type)) { continue; } - + int dummy; void* res = taosHashGet(pInfo->pSet, val, bytes); if (res == NULL) { - taosHashPut(pInfo->pSet, val, bytes, NULL, 0); + taosHashPut(pInfo->pSet, val, bytes, &dummy, sizeof(dummy)); char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows; memcpy(start, val, bytes); pRes->info.rows += 1; From 2b963dc3869acf962db01b28ff560e62d7cb2e7e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 9 Jul 2021 08:49:40 +0800 Subject: [PATCH 11/38] [TD-5134] fix runtime error --- src/client/src/tscSQLParser.c | 4 +- src/client/src/tscSubquery.c | 13 ++-- src/client/src/tscUtil.c | 14 ++-- src/common/src/tarithoperator.c | 71 ++++++++++++++++++- src/query/inc/qTableMeta.h | 5 +- src/query/src/qExecutor.c | 1 + .../general/parser/col_arithmetic_query.sim | 4 +- 7 files changed, 97 insertions(+), 15 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 9e8cb325a8..db97af616b 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5014,7 +5014,8 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo size_t numOfFields = tscNumOfFields(pQueryInfo); if (pQueryInfo->fillVal == NULL) { - pQueryInfo->fillVal = calloc(numOfFields, sizeof(int64_t)); + pQueryInfo->fillVal = calloc(numOfFields, sizeof(int64_t)); + pQueryInfo->numOfFillVal = numOfFields; if (pQueryInfo->fillVal == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -8191,6 +8192,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf } taosArrayAddBatch(pQueryInfo->exprList1, (void*) p, numOfExpr); + tfree(p); } #if 0 diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 804878fa45..55f4251660 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -107,6 +107,9 @@ bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { subState->states[idx] = 1; bool done = allSubqueryDone(pParentSql); + if (!done) { + tscDebug("0x%"PRIx64" sub:%p,%d completed, total:%d", pParentSql->self, pSql, idx, pParentSql->subState.numOfSub); + } pthread_mutex_unlock(&subState->mutex); return done; } @@ -1173,7 +1176,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow // no data exists in next vnode, mark the query completed // only when there is no subquery exits any more, proceeds to get the intersect of the tuple sets. if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) { - tscDebug("0x%"PRIx64" tagRetrieve:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub); + //tscDebug("0x%"PRIx64" tagRetrieve:%p,%d completed, total:%d", pParentSql->self, tres, pSupporter->subqueryIndex, pParentSql->subState.numOfSub); return; } @@ -1441,7 +1444,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR } if (!subAndCheckDone(pSql, pParentSql, pSupporter->subqueryIndex)) { - tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub); + //tscDebug("0x%"PRIx64" sub:0x%"PRIx64",%d completed, total:%d", pParentSql->self, pSql->self, pSupporter->subqueryIndex, pState->numOfSub); return; } @@ -3048,9 +3051,9 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) } } - int32_t suppIdx = pSupporter->index; - if (!subAndCheckDone(tres, pParentObj, suppIdx)) { - tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub); + if (!subAndCheckDone(tres, pParentObj, pSupporter->index)) { + // concurrency problem, other thread already release pParentObj + //tscDebug("0x%"PRIx64" insert:%p,%d completed, total:%d", pParentObj->self, tres, suppIdx, pParentObj->subState.numOfSub); return; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3195e74f02..62b57b484d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -2949,6 +2949,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) { pQueryInfo->tsBuf = NULL; pQueryInfo->fillType = pSrc->fillType; pQueryInfo->fillVal = NULL; + pQueryInfo->numOfFillVal = 0;; pQueryInfo->clauseLimit = pSrc->clauseLimit; pQueryInfo->prjOffset = pSrc->prjOffset; pQueryInfo->numOfTables = 0; @@ -2984,11 +2985,12 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) { } if (pSrc->fillType != TSDB_FILL_NONE) { - pQueryInfo->fillVal = malloc(pSrc->fieldsInfo.numOfOutput * sizeof(int64_t)); + pQueryInfo->fillVal = calloc(1, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t)); if (pQueryInfo->fillVal == NULL) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } + pQueryInfo->numOfFillVal = pSrc->fieldsInfo.numOfOutput; memcpy(pQueryInfo->fillVal, pSrc->fillVal, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t)); } @@ -3329,6 +3331,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNewQueryInfo->tsBuf = NULL; pNewQueryInfo->fillType = pQueryInfo->fillType; pNewQueryInfo->fillVal = NULL; + pNewQueryInfo->numOfFillVal = 0; pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit; pNewQueryInfo->prjOffset = pQueryInfo->prjOffset; pNewQueryInfo->numOfTables = 0; @@ -3359,11 +3362,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t } if (pQueryInfo->fillType != TSDB_FILL_NONE) { - pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); + //just make memory memory sanitizer happy + //refator later + pNewQueryInfo->fillVal = calloc(1, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); if (pNewQueryInfo->fillVal == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } + pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput; memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); } @@ -4382,9 +4388,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt if (pQueryAttr->fillType != TSDB_FILL_NONE) { pQueryAttr->fillVal = calloc(pQueryAttr->numOfOutput, sizeof(int64_t)); - int32_t fields = tscNumOfFields(pQueryInfo); - int32_t cpySize = fields < pQueryAttr->numOfOutput ? fields : pQueryAttr->numOfOutput; - memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, cpySize * sizeof(int64_t)); + memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, pQueryInfo->numOfFillVal * sizeof(int64_t)); } pQueryAttr->srcRowSize = 0; diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c index b37e358b9c..3779303e1a 100644 --- a/src/common/src/tarithoperator.c +++ b/src/common/src/tarithoperator.c @@ -18,7 +18,58 @@ #include "ttype.h" #include "tutil.h" #include "tarithoperator.h" +#include "tcompare.h" +//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); +#define ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ + { \ + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \ + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; \ + \ + if ((len1) == (len2)) { \ + for (; i < (len2) && i >= 0; i += step, (out) += 1) { \ + if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + *(out) = (double)(left)[i] op(right)[i]; \ + } \ + } else if ((len1) == 1) { \ + for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ + if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + *(out) = (double)(left)[0] op(right)[i]; \ + } \ + } else if ((len2) == 1) { \ + for (; i >= 0 && i < (len1); i += step, (out) += 1) { \ + if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + *(out) = (double)(left)[i] op(right)[0]; \ + } \ + } \ + } #define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ { \ int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \ @@ -62,6 +113,12 @@ SET_DOUBLE_NULL(out); \ continue; \ } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \ } \ } else if (len1 == 1) { \ @@ -70,6 +127,12 @@ SET_DOUBLE_NULL(out); \ continue; \ } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ *(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \ } \ } else if ((len2) == 1) { \ @@ -78,6 +141,12 @@ SET_DOUBLE_NULL(out); \ continue; \ } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[0])) * (right)[0]; \ } \ } \ @@ -90,7 +159,7 @@ #define ARRAY_LIST_MULTI(left, right, _left_type, _right_type, len1, len2, out, _ord) \ ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, *, TSDB_DATA_TYPE_DOUBLE, _ord) #define ARRAY_LIST_DIV(left, right, _left_type, _right_type, len1, len2, out, _ord) \ - ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord) + ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord) #define ARRAY_LIST_REM(left, right, _left_type, _right_type, len1, len2, out, _ord) \ ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, %, TSDB_DATA_TYPE_DOUBLE, _ord) diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h index 7ec6dfbcf9..3f17e62f1e 100644 --- a/src/query/inc/qTableMeta.h +++ b/src/query/inc/qTableMeta.h @@ -106,11 +106,14 @@ typedef struct SQueryInfo { STagCond tagCond; SOrderVal order; - int16_t fillType; // final result fill type int16_t numOfTables; STableMetaInfo **pTableMetaInfo; struct STSBuf *tsBuf; + + int16_t fillType; // final result fill type int64_t * fillVal; // default value for fill + int32_t numOfFillVal; // fill value size + char * msg; // pointer to the pCmd->payload to keep error message temporarily int64_t clauseLimit; // limit for current sub clause diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index ecdbc3be4f..0c7fbf4dc8 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -992,6 +992,7 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, setBlockStatisInfo(&pCtx[i], pBlock, &pOperator->pExpr[i].base.colInfo); if (pCtx[i].functionId == TSDB_FUNC_ARITHM) { + pCtx[i].param[1].pz = (char*) &Operator->pRuntimeEnv->sasArray[i]; setArithParams((SArithmeticSupport*)pCtx[i].param[1].pz, &pOperator->pExpr[i], pBlock); } else { SColIndex* pCol = &pOperator->pExpr[i].base.colInfo; diff --git a/tests/script/general/parser/col_arithmetic_query.sim b/tests/script/general/parser/col_arithmetic_query.sim index 191f56fcfb..17ae6cfd6b 100644 --- a/tests/script/general/parser/col_arithmetic_query.sim +++ b/tests/script/general/parser/col_arithmetic_query.sim @@ -193,7 +193,7 @@ if $data02 != 0.000000000 then return -1 endi -if $data03 != 0.000000000 then +if $data03 != NULL then return -1 endi @@ -444,7 +444,7 @@ if $data02 != 8.077777778 then return -1 endi -if $data03 != inf then +if $data03 != NULL then return -1 endi From f8df9d40ce90038dd31b4609b7a9e22fdacc519d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 9 Jul 2021 09:25:32 +0800 Subject: [PATCH 12/38] [TD-5134] fix runtime error --- src/query/src/qExecutor.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 0c7fbf4dc8..4ed7e19fb3 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -939,7 +939,9 @@ static TSKEY getStartTsKey(SQueryAttr* pQueryAttr, STimeWindow* win, const TSKEY static void setArithParams(SArithmeticSupport* sas, SExprInfo *pExprInfo, SSDataBlock* pSDataBlock) { sas->numOfCols = (int32_t) pSDataBlock->info.numOfCols; sas->pExprInfo = pExprInfo; - + if (sas->colList != NULL) { + return; + } sas->colList = calloc(1, pSDataBlock->info.numOfCols*sizeof(SColumnInfo)); for(int32_t i = 0; i < sas->numOfCols; ++i) { SColumnInfoData* pColData = taosArrayGet(pSDataBlock->pDataBlock, i); @@ -992,7 +994,6 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, setBlockStatisInfo(&pCtx[i], pBlock, &pOperator->pExpr[i].base.colInfo); if (pCtx[i].functionId == TSDB_FUNC_ARITHM) { - pCtx[i].param[1].pz = (char*) &Operator->pRuntimeEnv->sasArray[i]; setArithParams((SArithmeticSupport*)pCtx[i].param[1].pz, &pOperator->pExpr[i], pBlock); } else { SColIndex* pCol = &pOperator->pExpr[i].base.colInfo; From b36050cb95199bf9cec235f66fcf1e0050d727a5 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 9 Jul 2021 11:03:09 +0800 Subject: [PATCH 13/38] [TD-5134] fix runtime error --- src/client/src/tscSQLParser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index db97af616b..fdecb6eda5 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5015,7 +5015,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo if (pQueryInfo->fillVal == NULL) { pQueryInfo->fillVal = calloc(numOfFields, sizeof(int64_t)); - pQueryInfo->numOfFillVal = numOfFields; + pQueryInfo->numOfFillVal = (int32_t)numOfFields; if (pQueryInfo->fillVal == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } From 5a7917e69b861aed9ee4d75960c4c92e2a0c0850 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 10 Jul 2021 11:04:42 +0800 Subject: [PATCH 14/38] [TD-5134] fix runtime error --- src/client/src/tscSQLParser.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 9af6e0b8e6..5ecddb7910 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -3364,7 +3364,14 @@ static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, if (IS_NUMERIC_TYPE(pRight->value.nType)) { bufLen = 60; } else { - bufLen = pRight->value.nLen + 1; + /* + * make memory sanitizer happy; + */ + if (pRight->value.nLen == 0) { + bufLen = pRight->value.nLen + 2; + } else { + bufLen = pRight->value.nLen + 1; + } } if (pExpr->tokenId == TK_LE || pExpr->tokenId == TK_LT) { From 3050211e5ce73042a84eb2d4409c65269f0028e2 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 10 Jul 2021 13:58:11 +0800 Subject: [PATCH 15/38] merge develop --- src/client/src/tscSQLParser.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 5ecddb7910..ed76f8fd84 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2799,7 +2799,9 @@ int32_t doGetColumnIndexByName(SStrToken* pToken, SQueryInfo* pQueryInfo, SColum pIndex->columnIndex = TSDB_TBNAME_COLUMN_INDEX; } else if (strlen(DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == pToken->n && strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) { - pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; + pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest + } else if (pToken->n == 0) { + pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest } else { // not specify the table name, try to locate the table index by column name if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) { From 2a107511f784b01c224e6b51c3175da719c9340c Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 14 Jul 2021 10:20:46 +0800 Subject: [PATCH 16/38] [TD-5134] fix runtime error --- src/client/src/tscSQLParser.c | 23 +++++++++++++++-------- src/client/src/tscServer.c | 5 +++++ src/client/src/tscSubquery.c | 11 ++++++++--- 3 files changed, 28 insertions(+), 11 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index ed76f8fd84..dcd2d1e509 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4812,7 +4812,7 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq int32_t type = 0; if ((ret = getQueryCondExpr(&pSql->cmd, pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->tokenId)) != TSDB_CODE_SUCCESS) { - return ret; + goto PARSE_WHERE_EXIT; } tSqlExprCompact(pExpr); @@ -4822,17 +4822,17 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq // 1. check if it is a join query if ((ret = validateJoinExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) { - return ret; + goto PARSE_WHERE_EXIT; } // 2. get the query time range if ((ret = getTimeRangeFromExpr(&pSql->cmd, pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { - return ret; + goto PARSE_WHERE_EXIT; } // 3. get the tag query condition if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) { - return ret; + goto PARSE_WHERE_EXIT; } // 4. get the table name query condition @@ -7707,11 +7707,18 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { taosArrayPush(pVgroupList, &t); } - STableMeta* pMeta = tscTableMetaDup(pTableMeta); - STableMetaVgroupInfo p = { .pTableMeta = pMeta }; + //STableMeta* pMeta = tscTableMetaDup(pTableMeta); + //STableMetaVgroupInfo p = { .pTableMeta = pMeta }; + //const char* px = tNameGetTableName(pname); + //taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo)); + // avoid mem leak, may should update pTableMeta const char* px = tNameGetTableName(pname); - taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo)); + if (taosHashGet(pCmd->pTableMetaMap, px, strlen(px)) == NULL) { + STableMeta* pMeta = tscTableMetaDup(pTableMeta); + STableMetaVgroupInfo p = { .pTableMeta = pMeta, .pVgroupInfo = NULL}; + taosHashPut(pCmd->pTableMetaMap, px, strlen(px), &p, sizeof(STableMetaVgroupInfo)); + } } else { // add to the retrieve table meta array list. char* t = strdup(name); taosArrayPush(plist, &t); @@ -8161,7 +8168,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf // in case of join query, time range is required. if (QUERY_IS_JOIN_QUERY(pQueryInfo->type)) { - int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey); + uint64_t timeRange = (uint64_t)pQueryInfo->window.ekey - pQueryInfo->window.skey; if (timeRange == 0 && pQueryInfo->window.skey == 0) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index d30ee32d67..953dfb186a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2064,6 +2064,7 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg); if (!tIsValidSchema(pTableMeta->schema, pTableMeta->tableInfo.numOfColumns, pTableMeta->tableInfo.numOfTags)) { tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, pMetaMsg->tableFname); + tfree(pTableMeta); taosHashCleanup(pSet); taosReleaseRef(tscObjRef, pParentSql->self); return TSDB_CODE_TSC_INVALID_VALUE; @@ -2105,6 +2106,10 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { assert(p != NULL); int32_t size = 0; + if (p->pVgroupInfo!= NULL) { + tscVgroupInfoClear(p->pVgroupInfo); + //tfree(p->pTableMeta); + } p->pVgroupInfo = createVgroupInfoFromMsg(pMsg, &size, pSql->self); pMsg += size; } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 55f4251660..2fa8767eec 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -419,7 +419,9 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) { } // tscFieldInfoClear(&pSupporter->fieldsInfo); - + if (pSupporter->fieldsInfo.internalField != NULL) { + taosArrayDestroy(pSupporter->fieldsInfo.internalField); + } if (pSupporter->pTSBuf != NULL) { tsBufDestroy(pSupporter->pTSBuf); pSupporter->pTSBuf = NULL; @@ -433,7 +435,8 @@ static void tscDestroyJoinSupporter(SJoinSupporter* pSupporter) { } if (pSupporter->pVgroupTables != NULL) { - taosArrayDestroy(pSupporter->pVgroupTables); + //taosArrayDestroy(pSupporter->pVgroupTables); + tscFreeVgroupTableInfo(pSupporter->pVgroupTables); pSupporter->pVgroupTables = NULL; } @@ -892,7 +895,9 @@ static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pPar tscDebug("Join %d - num:%d", i, p->num); // sort according to the tag valu - qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar); + if (p->pIdTagList != NULL) { + qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar); + } if (!checkForDuplicateTagVal(pColSchema, p, pParentSql)) { for (int32_t j = 0; j <= i; j++) { From e17466980806a73306ae16204f1aea5121db6e62 Mon Sep 17 00:00:00 2001 From: xywang Date: Wed, 14 Jul 2021 13:54:45 +0800 Subject: [PATCH 17/38] [TD-5169]: fixed a potential crash bug --- src/plugins/http/inc/httpSql.h | 3 ++ src/plugins/http/src/httpGcHandle.c | 37 +++++++++++++++-- src/plugins/http/src/httpSql.c | 62 +++++++++++++++++++++++++++++ src/plugins/http/src/httpTgHandle.c | 17 +++++++- 4 files changed, 115 insertions(+), 4 deletions(-) diff --git a/src/plugins/http/inc/httpSql.h b/src/plugins/http/inc/httpSql.h index db3e3a3b16..325545af47 100644 --- a/src/plugins/http/inc/httpSql.h +++ b/src/plugins/http/inc/httpSql.h @@ -35,4 +35,7 @@ void httpTrimTableName(char *name); int32_t httpShrinkTableName(HttpContext *pContext, int32_t pos, char *name); char * httpGetCmdsString(HttpContext *pContext, int32_t pos); +int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql); +void httpCheckFreeEscapedSql(char *oldSql, char *newSql); + #endif diff --git a/src/plugins/http/src/httpGcHandle.c b/src/plugins/http/src/httpGcHandle.c index 925c74e7cd..ed3a28567e 100644 --- a/src/plugins/http/src/httpGcHandle.c +++ b/src/plugins/http/src/httpGcHandle.c @@ -176,6 +176,20 @@ bool gcProcessQueryRequest(HttpContext* pContext) { return false; } +#define ESCAPE_ERROR_PROC(code, context, root) \ + do { \ + if (code != 0) { \ + if (code == 1) { \ + httpSendErrorResp(context, TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR); \ + } else { \ + httpSendErrorResp(context, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY); \ + } \ + \ + cJSON_Delete(root); \ + return false; \ + } \ + } while (0) + for (int32_t i = 0; i < size; ++i) { cJSON* query = cJSON_GetArrayItem(root, i); if (query == NULL) continue; @@ -186,7 +200,14 @@ bool gcProcessQueryRequest(HttpContext* pContext) { continue; } - int32_t refIdBuffer = httpAddToSqlCmdBuffer(pContext, refId->valuestring); + char *newStr = NULL; + int32_t retCode = 0; + + retCode = httpCheckAllocEscapeSql(refId->valuestring, &newStr); + ESCAPE_ERROR_PROC(retCode, pContext, root); + + int32_t refIdBuffer = httpAddToSqlCmdBuffer(pContext, newStr); + httpCheckFreeEscapedSql(refId->valuestring, newStr); if (refIdBuffer == -1) { httpWarn("context:%p, fd:%d, user:%s, refId buffer is full", pContext, pContext->fd, pContext->user); break; @@ -195,7 +216,11 @@ bool gcProcessQueryRequest(HttpContext* pContext) { cJSON* alias = cJSON_GetObjectItem(query, "alias"); int32_t aliasBuffer = -1; if (!(alias == NULL || alias->valuestring == NULL || strlen(alias->valuestring) == 0)) { - aliasBuffer = httpAddToSqlCmdBuffer(pContext, alias->valuestring); + retCode = httpCheckAllocEscapeSql(alias->valuestring, &newStr); + ESCAPE_ERROR_PROC(retCode, pContext, root); + + aliasBuffer = httpAddToSqlCmdBuffer(pContext, newStr); + httpCheckFreeEscapedSql(alias->valuestring, newStr); if (aliasBuffer == -1) { httpWarn("context:%p, fd:%d, user:%s, alias buffer is full", pContext, pContext->fd, pContext->user); break; @@ -211,7 +236,11 @@ bool gcProcessQueryRequest(HttpContext* pContext) { continue; } - int32_t sqlBuffer = httpAddToSqlCmdBuffer(pContext, sql->valuestring); + retCode = httpCheckAllocEscapeSql(sql->valuestring, &newStr); + ESCAPE_ERROR_PROC(retCode, pContext, root); + + int32_t sqlBuffer = httpAddToSqlCmdBuffer(pContext, newStr); + httpCheckFreeEscapedSql(sql->valuestring, newStr); if (sqlBuffer == -1) { httpWarn("context:%p, fd:%d, user:%s, sql buffer is full", pContext, pContext->fd, pContext->user); break; @@ -237,6 +266,8 @@ bool gcProcessQueryRequest(HttpContext* pContext) { } } +#undef ESCAPE_ERROR_PROC + pContext->reqType = HTTP_REQTYPE_MULTI_SQL; pContext->encodeMethod = &gcQueryMethod; pContext->multiCmds->pos = 0; diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 5a0480b694..20d1e159b3 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -423,3 +423,65 @@ void httpProcessRequest(HttpContext *pContext) { httpExecCmd(pContext); } } + +int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql) +{ + char *pos; + + if (oldSql == NULL || newSql == NULL) { + return 0; + } + + /* bad sql clause */ + pos = strstr(oldSql, "%%"); + if (pos) { + httpError("bad sql:%s", oldSql); + return 1; + } + + pos = strchr(oldSql, '%'); + if (pos == NULL) { + httpDebug("sql:%s", oldSql); + *newSql = oldSql; + return 0; + } + + *newSql = (char *) calloc(1, (strlen(oldSql) << 1) + 1); + if (newSql == NULL) { + httpError("failed to allocate for new sql, old sql:%s", oldSql); + return -1; + } + + char *src = oldSql; + char *dst = *newSql; + size_t sqlLen = strlen(src); + + while (1) { + memcpy(dst, src, pos - src + 1); + dst += pos - src + 1; + *dst++ = '%'; + + if (pos + 1 >= oldSql + sqlLen) { + break; + } + + src = ++pos; + pos = strchr(pos, '%'); + if (pos == NULL) { + memcpy(dst, src, sqlLen - strlen(src)); + break; + } + } + + return 0; +} + +void httpCheckFreeEscapedSql(char *oldSql, char *newSql) +{ + if (oldSql && newSql) { + if (oldSql != newSql) { + free(newSql); + } + } +} + diff --git a/src/plugins/http/src/httpTgHandle.c b/src/plugins/http/src/httpTgHandle.c index 69ac3e19c5..8aa156b84a 100644 --- a/src/plugins/http/src/httpTgHandle.c +++ b/src/plugins/http/src/httpTgHandle.c @@ -610,7 +610,22 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { // stable tag for detail for (int32_t i = 0; i < orderTagsLen; ++i) { cJSON *tag = orderedTags[i]; - stable_cmd->tagNames[i] = table_cmd->tagNames[i] = httpAddToSqlCmdBuffer(pContext, tag->string); + + char *tagStr = NULL; + int32_t retCode = httpCheckAllocEscapeSql(tag->string, &tagStr); + if (retCode != 0) { + if (retCode == 1) { + httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_INVALID_JSON); + } else { + httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY); + } + + return false; + } + + stable_cmd->tagNames[i] = table_cmd->tagNames[i] = httpAddToSqlCmdBuffer(pContext, tagStr); + + httpCheckFreeEscapedSql(tag->string, tagStr); if (tag->type == cJSON_String) stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring); From ff9341e2e3838f9c108857a45c5b8e9a16a95cbc Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 14 Jul 2021 16:12:58 +0800 Subject: [PATCH 18/38] [td-225]code refactor. --- src/client/src/tscSql.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 1e93892876..60261368f4 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -948,8 +948,6 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); pSql->pTscObj = taos; pSql->signature = pSql; - - pSql->fp = NULL; // todo set the correct callback function pointer pSql->cmd.pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); int32_t length = (int32_t)strlen(tableNameList); From cf76ed44fe396003a970f79c6c9ca882a85efe0f Mon Sep 17 00:00:00 2001 From: xywang Date: Wed, 14 Jul 2021 16:45:32 +0800 Subject: [PATCH 19/38] [TD-5169]: fixed a parsing bug --- src/plugins/http/src/httpSql.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 20d1e159b3..b2480dcad8 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -468,7 +468,7 @@ int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql) src = ++pos; pos = strchr(pos, '%'); if (pos == NULL) { - memcpy(dst, src, sqlLen - strlen(src)); + memcpy(dst, src, strlen(src)); break; } } From cc13cd44c37d65e32f372be04299164ecd4278f4 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Wed, 14 Jul 2021 17:25:49 +0800 Subject: [PATCH 20/38] [TD-5260]add case for a potential crash --- tests/script/general/http/grafana_bug.sim | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/script/general/http/grafana_bug.sim b/tests/script/general/http/grafana_bug.sim index 0816e88f3f..ed184e17c6 100644 --- a/tests/script/general/http/grafana_bug.sim +++ b/tests/script/general/http/grafana_bug.sim @@ -247,4 +247,25 @@ if $system_content != @[{"refId":"A","target":"{val1:nil, val2:nil}","datapoints return -1 endi +sql create table tt (ts timestamp ,i int) tags(j binary(20),k binary(20)); +sql insert into t1 using tt tags('jnetworki','t1') values('2020-01-01 00:00:00.000',1)('2020-01-01 00:01:00.000',2)('2020-01-01 00:02:00.000',3)('2020-01-01 00:03:00.000',4)('2020-01-01 00:04:00.000',5); + +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027%network%\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query +print step1-> $system_content +if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then + return -1 +endi + +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027jnetwo%\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query +print step1-> $system_content +if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then + return -1 +endi + +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027%networki\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query +print step1-> $system_content +if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file From 361f81e403a39d57ea45cce5644a764a4f7dd4a7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 14 Jul 2021 18:00:00 +0800 Subject: [PATCH 21/38] fix runtime bug --- src/client/src/tscSQLParser.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index dcd2d1e509..c3449a5bfb 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4827,12 +4827,12 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq // 2. get the query time range if ((ret = getTimeRangeFromExpr(&pSql->cmd, pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { - goto PARSE_WHERE_EXIT; + return ret; } // 3. get the tag query condition if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) { - goto PARSE_WHERE_EXIT; + return ret; } // 4. get the table name query condition From 954393e7706a37ace31e2fb55670ba23e454b672 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 14 Jul 2021 22:25:03 +0800 Subject: [PATCH 22/38] [td-5271]: fix a bug of max query in outer query. --- src/query/src/qAggMain.c | 13 ++++++------ tests/script/general/parser/nestquery.sim | 26 +++++++++++++++++++---- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 80285f498b..98ffebe616 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -58,7 +58,7 @@ for (int32_t _i = 0; _i < (ctx)->tagInfo.numOfTagCols; ++_i) { \ SQLFunctionCtx *__ctx = (ctx)->tagInfo.pTagCtxList[_i]; \ if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) { \ - __ctx->tag.i64 = (ts); \ + __ctx->tag.i64 = (ts); \ __ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; \ } \ aAggs[TSDB_FUNC_TAG].xFunction(__ctx); \ @@ -520,7 +520,7 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { if ((ctx)->hasNull && isNull((char *)&(list)[i], tsdbType)) { \ continue; \ } \ - TSKEY key = GET_TS_DATA(ctx, i); \ + TSKEY key = (ctx)->ptsList != NULL? GET_TS_DATA(ctx, i):0; \ UPDATE_DATA(ctx, val, (list)[i], num, sign, key); \ } @@ -1463,10 +1463,11 @@ static void first_function(SQLFunctionCtx *pCtx) { } memcpy(pCtx->pOutput, data, pCtx->inputBytes); - - TSKEY k = GET_TS_DATA(pCtx, i); - DO_UPDATE_TAG_COLUMNS(pCtx, k); - + if (pCtx->ptsList != NULL) { + TSKEY k = GET_TS_DATA(pCtx, i); + DO_UPDATE_TAG_COLUMNS(pCtx, k); + } + SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; pInfo->complete = true; diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim index 6035992d30..3c1ba03369 100644 --- a/tests/script/general/parser/nestquery.sim +++ b/tests/script/general/parser/nestquery.sim @@ -334,10 +334,6 @@ sql select top(x, 20) from (select c1 x from nest_tb0); sql select bottom(x, 20) from (select c1 x from nest_tb0) -print ===================> complex query - - - print ===================> group by + having @@ -464,6 +460,28 @@ if $data01 != 0.000083333 then return -1 endi +print ======================>TD-5271 +sql select min(val),max(val),first(val),last(val),count(val),sum(val),avg(val) from (select count(*) val from nest_mt0 group by tbname) +if $rows != 1 then + return -1 +endi + +if $data00 != 10000 then + return -1 +endi + +if $data01 != 10000 then + return -1 +endi + +if $data04 != 10 then + return -1 +endi + +if $data05 != 100000 then + return -1 +endi + print =================>us database interval query, TD-5039 sql create database test precision 'us'; sql use test; From cc8744f1db91fc86dbdb6dfdb7599374c531c6b4 Mon Sep 17 00:00:00 2001 From: Jun Li Date: Wed, 14 Jul 2021 22:03:25 -0700 Subject: [PATCH 23/38] Add some guardrails --- src/util/src/tqueue.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c index 7caa1a6c37..6a37f11ece 100644 --- a/src/util/src/tqueue.c +++ b/src/util/src/tqueue.c @@ -173,10 +173,12 @@ int taosReadAllQitems(taos_queue param, taos_qall p2) { STaosQueue *queue = (STaosQueue *)param; STaosQall *qall = (STaosQall *)p2; int code = 0; + bool empty; pthread_mutex_lock(&queue->mutex); - if (queue->head) { + empty = queue->head == NULL; + if (!empty) { memset(qall, 0, sizeof(STaosQall)); qall->current = queue->head; qall->start = queue->head; @@ -188,11 +190,17 @@ int taosReadAllQitems(taos_queue param, taos_qall p2) { queue->tail = NULL; queue->numOfItems = 0; if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, qall->numOfItems); - } + } pthread_mutex_unlock(&queue->mutex); - - return code; + + // if source queue is empty, we set destination qall to empty too. + if (empty) { + qall->current = NULL; + qall->start = NULL; + qall->numOfItems = 0; + } + return code; } int taosGetQitem(taos_qall param, int *type, void **pitem) { @@ -423,10 +431,22 @@ int taosReadAllQitemsFromQset(taos_qset param, taos_qall p2, void **phandle) { int taosGetQueueItemsNumber(taos_queue param) { STaosQueue *queue = (STaosQueue *)param; - return queue->numOfItems; + if (!queue) return 0; + + int num; + pthread_mutex_lock(&queue->mutex); + num = queue->numOfItems; + pthread_mutex_unlock(&queue->mutex); + return num; } int taosGetQsetItemsNumber(taos_qset param) { STaosQset *qset = (STaosQset *)param; - return qset->numOfItems; + if (!qset) return 0; + + int num = 0; + pthread_mutex_lock(&qset->mutex); + num = qset->numOfItems; + pthread_mutex_unlock(&qset->mutex); + return num; } From d2941c06088c20e150f9faf459cf000e054ebf8e Mon Sep 17 00:00:00 2001 From: xywang Date: Thu, 15 Jul 2021 13:17:10 +0800 Subject: [PATCH 24/38] [TD-5169]: simplified function implementation --- src/inc/taoserror.h | 2 ++ src/plugins/http/src/httpGcHandle.c | 8 ++------ src/plugins/http/src/httpSql.c | 10 +++++----- src/plugins/http/src/httpTgHandle.c | 8 ++------ src/util/src/terror.c | 2 ++ 5 files changed, 13 insertions(+), 17 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 1e996be889..b18aa2c2d9 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -395,6 +395,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find") #define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string") +#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error") + // odbc #define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory") #define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input") diff --git a/src/plugins/http/src/httpGcHandle.c b/src/plugins/http/src/httpGcHandle.c index ed3a28567e..883afcc4ec 100644 --- a/src/plugins/http/src/httpGcHandle.c +++ b/src/plugins/http/src/httpGcHandle.c @@ -178,12 +178,8 @@ bool gcProcessQueryRequest(HttpContext* pContext) { #define ESCAPE_ERROR_PROC(code, context, root) \ do { \ - if (code != 0) { \ - if (code == 1) { \ - httpSendErrorResp(context, TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR); \ - } else { \ - httpSendErrorResp(context, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY); \ - } \ + if (code != TSDB_CODE_SUCCESS) { \ + httpSendErrorResp(context, code); \ \ cJSON_Delete(root); \ return false; \ diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index b2480dcad8..c2e723732a 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -429,27 +429,27 @@ int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql) char *pos; if (oldSql == NULL || newSql == NULL) { - return 0; + return TSDB_CODE_SUCCESS; } /* bad sql clause */ pos = strstr(oldSql, "%%"); if (pos) { httpError("bad sql:%s", oldSql); - return 1; + return TSDB_CODE_HTTP_REQUEST_JSON_ERROR; } pos = strchr(oldSql, '%'); if (pos == NULL) { httpDebug("sql:%s", oldSql); *newSql = oldSql; - return 0; + return TSDB_CODE_SUCCESS; } *newSql = (char *) calloc(1, (strlen(oldSql) << 1) + 1); if (newSql == NULL) { httpError("failed to allocate for new sql, old sql:%s", oldSql); - return -1; + return TSDB_CODE_HTTP_NO_ENOUGH_MEMORY; } char *src = oldSql; @@ -473,7 +473,7 @@ int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql) } } - return 0; + return TSDB_CODE_SUCCESS; } void httpCheckFreeEscapedSql(char *oldSql, char *newSql) diff --git a/src/plugins/http/src/httpTgHandle.c b/src/plugins/http/src/httpTgHandle.c index 8aa156b84a..32516b9fd1 100644 --- a/src/plugins/http/src/httpTgHandle.c +++ b/src/plugins/http/src/httpTgHandle.c @@ -613,12 +613,8 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { char *tagStr = NULL; int32_t retCode = httpCheckAllocEscapeSql(tag->string, &tagStr); - if (retCode != 0) { - if (retCode == 1) { - httpSendErrorResp(pContext, TSDB_CODE_HTTP_TG_INVALID_JSON); - } else { - httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY); - } + if (retCode != TSDB_CODE_SUCCESS) { + httpSendErrorResp(pContext, retCode); return false; } diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 27a08d8e9e..6cb508ebae 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -403,6 +403,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG, "tag value can not mor TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, "value not find") TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, "value type should be boolean, number or string") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUEST_JSON_ERROR, "http request json error") + // odbc TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, "out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM, "convertion not a valid literal input") From 0adfdc60d6e145dffd1f172d1621fe175ac610cc Mon Sep 17 00:00:00 2001 From: ubuntu Date: Thu, 15 Jul 2021 15:04:58 +0800 Subject: [PATCH 25/38] test push --- tests/pytest/query/queryCnameDisplay.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/pytest/query/queryCnameDisplay.py diff --git a/tests/pytest/query/queryCnameDisplay.py b/tests/pytest/query/queryCnameDisplay.py new file mode 100644 index 0000000000..e69de29bb2 From b68bb2a84a9444166abca2e1d06424a7c5a78396 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Jul 2021 15:28:55 +0800 Subject: [PATCH 26/38] [td-5037]: fix the last query performance worse then previous problem. --- src/query/inc/qExecutor.h | 2 +- src/query/src/qExecutor.c | 40 +++++++++++++++++++++------------------ src/query/src/queryMain.c | 2 +- src/tsdb/src/tsdbRead.c | 16 +++++++++++++--- 4 files changed, 37 insertions(+), 23 deletions(-) diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 9348606d0c..c4276bfe37 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -589,7 +589,7 @@ int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t nu SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code); SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, - SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t *qId); + SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t qId); int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* pQInfo, SQueryParam* param, char* start, int32_t prevResultLen, void* merger); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index fa2ddb05b8..dbcea4e90c 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2268,10 +2268,11 @@ static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) { return status; } -static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { - SQueryAttr* pQueryAttr = &pQInfo->query; - size_t t = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList); - for(int32_t i = 0; i < t; ++i) { +static void doUpdateLastKey(SQueryAttr* pQueryAttr) { + STimeWindow* win = &pQueryAttr->window; + + size_t num = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList); + for(int32_t i = 0; i < num; ++i) { SArray* p1 = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); size_t len = taosArrayGetSize(p1); @@ -2286,7 +2287,7 @@ static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { } } -static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) { +static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) { SQueryAttr* pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; // in case of point-interpolation query, use asc order scan @@ -2303,6 +2304,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); } + pQueryAttr->needReverseScan = false; return; } @@ -2312,7 +2314,8 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); } - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + pQueryAttr->needReverseScan = false; + doUpdateLastKey(pQueryAttr); return; } @@ -2333,7 +2336,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + doUpdateLastKey(pQueryAttr); } pQueryAttr->order.order = TSDB_ORDER_ASC; @@ -2343,12 +2346,13 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + doUpdateLastKey(pQueryAttr); } pQueryAttr->order.order = TSDB_ORDER_DESC; } + pQueryAttr->needReverseScan = false; } else { // interval query if (stableQuery) { if (onlyFirstQuery(pQueryAttr)) { @@ -2357,20 +2361,22 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + doUpdateLastKey(pQueryAttr); } pQueryAttr->order.order = TSDB_ORDER_ASC; + pQueryAttr->needReverseScan = false; } else if (onlyLastQuery(pQueryAttr)) { if (QUERY_IS_ASC_QUERY(pQueryAttr)) { qDebug(msg, pQInfo, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + doUpdateLastKey(pQueryAttr); } pQueryAttr->order.order = TSDB_ORDER_DESC; + pQueryAttr->needReverseScan = false; } } } @@ -2388,9 +2394,6 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i while(((*rowsize) * MIN_ROWS_PER_PAGE) > (*ps) - overhead) { *ps = ((*ps) << 1u); } - -// pRuntimeEnv->numOfRowsPerPage = ((*ps) - sizeof(tFilePage)) / (*rowsize); -// assert(pRuntimeEnv->numOfRowsPerPage <= MAX_ROWS_PER_RESBUF_PAGE); } #define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR) @@ -4382,7 +4385,7 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr break; } case OP_DataBlocksOptScan: { - pRuntimeEnv->proot = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr), 1); + pRuntimeEnv->proot = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr), pQueryAttr->needReverseScan? 1:0); break; } case OP_TableScan: { @@ -4420,8 +4423,10 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr if (pQInfo->summary.queryProfEvents == NULL) { qDebug("QInfo:0x%"PRIx64" failed to allocate query prof events array", pQInfo->qId); } + pQInfo->summary.operatorProfResults = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TINYINT), true, HASH_NO_LOCK); + if (pQInfo->summary.operatorProfResults == NULL) { qDebug("QInfo:0x%"PRIx64" failed to allocate operator prof results hash", pQInfo->qId); } @@ -4814,7 +4819,6 @@ SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntime pInfo->reverseTimes = reverseTime; pInfo->current = 0; pInfo->order = pRuntimeEnv->pQueryAttr->order.order; -// pInfo->prevGroupId = -1; SOperatorInfo* pOptr = calloc(1, sizeof(SOperatorInfo)); pOptr->name = "DataBlocksOptimizedScanOperator"; @@ -7366,7 +7370,7 @@ FORCE_INLINE bool checkQIdEqual(void *qHandle, uint64_t qId) { SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, SExprInfo* pExprs, SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, - char* sql, uint64_t *qId) { + char* sql, uint64_t qId) { int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfOutput = pQueryMsg->numOfOutput; @@ -7375,7 +7379,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S goto _cleanup_qinfo; } - pQInfo->qId = *qId; + pQInfo->qId = qId; // to make sure third party won't overwrite this structure pQInfo->signature = pQInfo; @@ -7485,7 +7489,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S tsem_init(&pQInfo->ready, 0, 0); pQueryAttr->window = pQueryMsg->window; - changeExecuteScanOrder(pQInfo, pQueryMsg, pQueryAttr->stableQuery); + updateDataCheckOrder(pQInfo, pQueryMsg, pQueryAttr->stableQuery); SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; STimeWindow window = pQueryAttr->window; diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 787cb2f7d1..d4aa523bf8 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -162,7 +162,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi assert(pQueryMsg->stableQuery == isSTableQuery); (*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo, - param.pTagColumnInfo, vgId, param.sql, qId); + param.pTagColumnInfo, vgId, param.sql, *qId); param.sql = NULL; param.pExprs = NULL; diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index b17aa755a5..1eafb5e233 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -98,6 +98,8 @@ typedef struct SIOCostSummary { int64_t blockLoadTime; int64_t statisInfoLoadTime; int64_t checkForNextTime; + int64_t headFileLoad; + int64_t headFileLoadTime; } SIOCostSummary; typedef struct STsdbQueryHandle { @@ -1045,15 +1047,21 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo int32_t code = TSDB_CODE_SUCCESS; *numOfBlocks = 0; + pQueryHandle->cost.headFileLoad += 1; + int64_t s = taosGetTimestampUs(); + size_t numOfTables = 0; if (pQueryHandle->loadType == BLOCK_LOAD_TABLE_SEQ_ORDER) { - code = loadBlockInfo(pQueryHandle, pQueryHandle->activeIndex, numOfBlocks); + code = loadBlockInfo(pQueryHandle, pQueryHandle->activeIndex, numOfBlocks); } else if (pQueryHandle->loadType == BLOCK_LOAD_OFFSET_SEQ_ORDER) { numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); for (int32_t i = 0; i < numOfTables; ++i) { code = loadBlockInfo(pQueryHandle, i, numOfBlocks); if (code != TSDB_CODE_SUCCESS) { + int64_t e = taosGetTimestampUs(); + + pQueryHandle->cost.headFileLoadTime += (e - s); return code; } } @@ -1061,6 +1069,8 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo assert(0); } + int64_t e = taosGetTimestampUs(); + pQueryHandle->cost.headFileLoadTime += (e - s); return code; } @@ -3731,8 +3741,8 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); SIOCostSummary* pCost = &pQueryHandle->cost; - tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64, - pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId); + tsdbDebug("%p :io-cost summary: head-file read cnt:%"PRIu64", head-file time:%"PRIu64" us, statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64, + pQueryHandle, pCost->headFileLoad, pCost->headFileLoadTime, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId); tfree(pQueryHandle); } From 115c00216f3ebefff05e7e0990a77d3812f71f0c Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Thu, 15 Jul 2021 09:12:49 +0000 Subject: [PATCH 27/38] script to control cluster used for test robust --- tests/cluster/cluster.sh | 324 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 324 insertions(+) create mode 100644 tests/cluster/cluster.sh diff --git a/tests/cluster/cluster.sh b/tests/cluster/cluster.sh new file mode 100644 index 0000000000..166014fefb --- /dev/null +++ b/tests/cluster/cluster.sh @@ -0,0 +1,324 @@ +#!/bin/bash +stty erase '^H' +stty erase '^?' + +# 运行前需要安装expect; apt install expect +# 运行方式: +# ./cluster.sh -c xxx.cfg +# cfg文件内格式: 每行代表一个节点 第一列为external ip、第二列为密码、第三列为用户名、第四列为hostname、第五列为interal ip +# 注意:列与列直接用空格隔开 +# 例子: +# 51.143.97.155 tbase125! root node5 10.2.0.10 +# 20.94.253.116 tbase125! root node2 10.2.0.12 +# 20.94.250.236 tbase125! root node3 10.2.0.13 +# 20.98.72.51 tbase125! root node4 10.2.0.14 + +menu(){ + echo "==============================" + echo "-------------Target-----------" + echo "==============================" + echo "1 cluster" + echo "==============================" + echo "2 dnode" + echo "==============================" + echo "3 arbitrator" + echo "==============================" + echo "4 exit" + echo "==============================" +} + +cluster_menu(){ + echo "==============================" + echo "----------Operation-----------" + echo "==============================" + echo "1 start cluster" + echo "==============================" + echo "2 stop cluster" + echo "==============================" + echo "3 exit" + echo "==============================" +} + +dnode_menu(){ + echo "==============================" + echo "----------Operation-----------" + echo "==============================" + echo "1 start dnode" + echo "==============================" + echo "2 stop dnode" + echo "==============================" + echo "3 add dnode" + echo "==============================" + echo "4 drop dnode" + echo "==============================" + echo "5 exit" + echo "==============================" +} + +arbitrator_menu(){ + echo "==============================" + echo "----------Operation-----------" + echo "==============================" + echo "1 start arbitrator" + echo "==============================" + echo "2 stop arbitrator" + echo "==============================" + echo "3 exit" + echo "==============================" +} + +print_cfg() { + echo "==============================" + echo "-------Configure file---------" + echo "==============================" + echo "Id | IP address | hostname" + i=1 + while read line || [[ -n ${line} ]] + do + arr=($line) + echo " $i | ${arr[0]} | ${arr[3]}" + i=`expr $i + 1`; + done < $1 + echo "==============================" +} + +update(){ + expect -c " + set timeout -1; + spawn ssh $3@$1; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect { + *#* { send \"systemctl $4 taosd\r\" } + } + expect { + *#* { send \"exit\r\" } + } + expect eof; + " + echo -e "\033[32mdnode successfully $4 \033[0m" +} + +update_dnode(){ + i=1 + while read line || [[ -n ${line} ]] + do + if [[ $1 -eq $i ]]; then + arr=($line) + update ${arr[0]} ${arr[1]} ${arr[2]} $2 + break; + fi + i=`expr $i + 1`; + done < $3 +} + +add_hosts() { + expect -c " + set timeout -1; + spawn ssh $3@$1; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect { + *#* { send \"echo $4 $5 >> /etc/hosts\r\" } + } + expect { + *#* { send \"exit\r\" } + } + expect eof; + " + echo -e "\033[32mSuccessfully add to /etc/hosts in $1\033[0m" +} + +remove_hosts() { + expect -c " + set timeout -1; + spawn ssh $3@$1; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect { + *#* { send \"sed -i '/$4/d\' /etc/hosts\r\" } + } + expect { + *#* { send \"exit\r\" } + } + expect eof; + " + echo -e "\033[32mSuccessfully remove from /etc/hosts in $1\033[0m" +} + +remove_varlibtaos() { + expect -c " + set timeout -1; + spawn ssh $3@$1; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect { + *#* { send \"rm -rf /var/lib/taos/*\r\" } + } + expect { + *#* { send \"exit\r\" } + } + expect eof; + " + echo -e "\033[32mSuccessfully remove /var/lib/taos/* in $1\033[0m" +} + +scp_cfg() { + expect -c " + set timeout -1; + spawn scp /etc/taos/taos.cfg $3@$1:/etc/taos; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect eof; + " + echo -e "\033[32mSuccessfully scp /etc/taos/taos.cfg to $1\033[0m" +} + +manage_dnode(){ + i=1 + while read line || [[ -n ${line} ]] + do + if [[ $1 -eq $i ]]; then + arr=($line) + scp_cfg ${arr[0]} ${arr[1]} ${arr[2]} + ip=${arr[0]} + pd=${arr[1]} + user=${arr[2]} + j=1 + while read line2 || [[ -n ${line2} ]] + do + arr2=($line2) + if [[ $1 -ne $j ]]; then + if [ $3 == "create" ];then + echo "$3" + add_hosts $ip $pd $user ${arr2[4]} ${arr2[3]} + else + remove_hosts $ip $pd $user ${arr2[4]} ${arr2[3]} + fi + fi + j=`expr $j + 1`; + done < $2 + remove_varlibtaos $ip $pd $user + if [ $3 == "create" ];then + update $ip $pd $user "start" + else + update $ip $pd $user "stop" + fi + taos -s "$3 dnode \"${arr[3]}:6030\"" + break; + fi + i=`expr $i + 1`; + done < $2 + echo -e "\033[32mSuccessfully $3 dnode id $1\033[0m" +} + +update_cluster() { + while read line || [[ -n ${line} ]] + do + arr=($line) + if [ $1 == "start" ]; then + scp_cfg ${arr[0]} ${arr[1]} ${arr[2]} + fi + update ${arr[0]} ${arr[1]} ${arr[2]} $1 + done < $2 +} + +while : +do + clear + menu + read -p "select mode: " n + case $n in + 1) + clear + print_cfg $2 + cluster_menu + read -p "select operation: " c + case $c in + 1) + update_cluster "start" $2 + break + ;; + 2) + update_cluster "stop" $2 + break + ;; + 3) + break + ;; + esac + ;; + 2) + clear + print_cfg $2 + dnode_menu + read -p "select operation: " d + case $d in + 1) + clear + print_cfg $2 + read -p "select dnode: " id + update_dnode $id "start" $2 + break + ;; + 2) + clear + print_cfg $2 + read -p "select dnode: " id + update_dnode $id "stop" $2 + break + ;; + 3) + clear + print_cfg $2 + read -p "select dnode: " id + manage_dnode $id $2 "create" + break + ;; + 4) + clear + print_cfg $2 + read -p "select dnode: " id + manage_dnode $id $2 "drop" + break + ;; + 5) + break + ;; + esac + ;; + 3) + clear + arbitrator_menu + read -p "select operation: " m + case $m in + 1) + nohup /usr/local/taos/bin/tarbitrator >/dev/null 2>&1 & + echo -e "\033[32mSuccessfully start arbitrator $3 \033[0m" + break + ;; + 2) + var=`ps -ef | grep tarbitrator | awk '{print $2}' | head -n 1` + kill -9 $var + break + ;; + 3) + break + ;; + esac + ;; + 4) + break + ;; + esac +done From 84da4d656f12921af36be6f001e53fa3c687aa02 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Jul 2021 17:34:22 +0800 Subject: [PATCH 28/38] [td-5250]: compress the tableMeta info msg. --- src/client/src/tscServer.c | 22 +++++++++++++++++++--- src/inc/taosmsg.h | 10 ++++++---- src/mnode/src/mnodeTable.c | 36 ++++++++++++++++++++++++++++-------- 3 files changed, 53 insertions(+), 15 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index d30ee32d67..657d8b7848 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -13,7 +13,10 @@ * along with this program. If not, see . */ +#include #include "os.h" +#include "qPlan.h" +#include "qTableMeta.h" #include "tcmdtype.h" #include "tlockfree.h" #include "trpc.h" @@ -21,10 +24,8 @@ #include "tscLog.h" #include "tscProfile.h" #include "tscUtil.h" -#include "qTableMeta.h" #include "tsclient.h" #include "ttimer.h" -#include "qPlan.h" int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0}; @@ -2048,16 +2049,27 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { } SSqlCmd *pParentCmd = &pParentSql->cmd; - SHashObj *pSet = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); char* pMsg = pMultiMeta->meta; + char* buf = NULL; + if (pMultiMeta->compressed) { + buf = malloc(pMultiMeta->rawLen - sizeof(SMultiTableMeta)); + int32_t len = tsDecompressString(pMultiMeta->meta, pMultiMeta->contLen - sizeof(SMultiTableMeta), 1, + buf, pMultiMeta->rawLen - sizeof(SMultiTableMeta), ONE_STAGE_COMP, NULL, 0); + assert(len == pMultiMeta->rawLen - sizeof(SMultiTableMeta)); + + pMsg = buf; + } + for (int32_t i = 0; i < pMultiMeta->numOfTables; i++) { STableMetaMsg *pMetaMsg = (STableMetaMsg *)pMsg; int32_t code = tableMetaMsgConvert(pMetaMsg); if (code != TSDB_CODE_SUCCESS) { taosHashCleanup(pSet); taosReleaseRef(tscObjRef, pParentSql->self); + + tfree(buf); return code; } @@ -2066,6 +2078,8 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { tscError("0x%"PRIx64" invalid table meta from mnode, name:%s", pSql->self, pMetaMsg->tableFname); taosHashCleanup(pSet); taosReleaseRef(tscObjRef, pParentSql->self); + + tfree(buf); return TSDB_CODE_TSC_INVALID_VALUE; } @@ -2115,6 +2129,8 @@ int tscProcessMultiTableMetaRsp(SSqlObj *pSql) { taosHashCleanup(pSet); taosReleaseRef(tscObjRef, pParentSql->self); + + tfree(buf); return TSDB_CODE_SUCCESS; } diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index f20e1535ba..4e76b6dcc1 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -760,10 +760,12 @@ typedef struct STableMetaMsg { } STableMetaMsg; typedef struct SMultiTableMeta { - int32_t numOfTables; - int32_t numOfVgroup; - int32_t contLen; - char meta[]; + int32_t numOfTables; + int32_t numOfVgroup; + uint32_t contLen:31; + uint8_t compressed:1; // denote if compressed or not + uint32_t rawLen; // size before compress + char meta[]; } SMultiTableMeta; typedef struct { diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index beeff372aa..ea5611e683 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -2892,7 +2892,7 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray (*totalMallocLen) *= 2; } - pMultiMeta = rpcReallocCont(pMultiMeta, *totalMallocLen); + pMultiMeta = realloc(pMultiMeta, *totalMallocLen); if (pMultiMeta == NULL) { return NULL; } @@ -2923,8 +2923,8 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { } // first malloc 80KB, subsequent reallocation will expand the size as twice of the original size - int32_t totalMallocLen = sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16); - pMultiMeta = rpcMallocCont(totalMallocLen); + int32_t totalMallocLen = sizeof(SMultiTableMeta) + sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16); + pMultiMeta = calloc(1, totalMallocLen); if (pMultiMeta == NULL) { code = TSDB_CODE_MND_OUT_OF_MEMORY; goto _end; @@ -2957,7 +2957,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { int remain = totalMallocLen - pMultiMeta->contLen; if (remain <= sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16)) { totalMallocLen *= 2; - pMultiMeta = rpcReallocCont(pMultiMeta, totalMallocLen); + pMultiMeta = realloc(pMultiMeta, totalMallocLen); if (pMultiMeta == NULL) { mnodeDecTableRef(pMsg->pTable); code = TSDB_CODE_MND_OUT_OF_MEMORY; @@ -3027,16 +3027,36 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { pMsg->rpcRsp.len = pMultiMeta->contLen; code = TSDB_CODE_SUCCESS; + char* tmp = rpcMallocCont(pMultiMeta->contLen + 2); + int32_t len = tsCompressString(pMultiMeta->meta, (int32_t)pMultiMeta->contLen - sizeof(SMultiTableMeta), 1, + tmp + sizeof(SMultiTableMeta), (int32_t)pMultiMeta->contLen - sizeof(SMultiTableMeta) + 2, ONE_STAGE_COMP, NULL, 0); + + pMultiMeta->rawLen = pMultiMeta->contLen; + if (len == -1 || len + sizeof(SMultiTableMeta) >= pMultiMeta->contLen + 2) { // compress failed, do not compress this binary data + pMultiMeta->compressed = 0; + memcpy(tmp, pMultiMeta, sizeof(SMultiTableMeta) + pMultiMeta->contLen); + } else { + pMultiMeta->compressed = 1; + pMultiMeta->contLen = sizeof(SMultiTableMeta) + len; + + // copy the header and the compressed payload + memcpy(tmp, pMultiMeta, sizeof(SMultiTableMeta)); + } + + pMsg->rpcRsp.rsp = tmp; + pMsg->rpcRsp.len = pMultiMeta->contLen; + + SMultiTableMeta* p = (SMultiTableMeta*) tmp; + + mDebug("multiTable info build completed, original:%d, compressed:%d, comp:%d", p->rawLen, p->contLen, p->compressed); + _end: tfree(str); tfree(nameList); taosArrayDestroy(pList); pMsg->pTable = NULL; pMsg->pVgroup = NULL; - - if (code != TSDB_CODE_SUCCESS) { - rpcFreeCont(pMultiMeta); - } + tfree(pMultiMeta); return code; } From c8308dbafd0f5212e944a47cec9e18f13ac99328 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Jul 2021 17:36:16 +0800 Subject: [PATCH 29/38] [td-225]add malloc buffer check. --- src/mnode/src/mnodeTable.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index ea5611e683..6dc2f8ad28 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -3028,6 +3028,11 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { code = TSDB_CODE_SUCCESS; char* tmp = rpcMallocCont(pMultiMeta->contLen + 2); + if (tmp == NULL) { + code = TSDB_CODE_MND_OUT_OF_MEMORY; + goto _end; + } + int32_t len = tsCompressString(pMultiMeta->meta, (int32_t)pMultiMeta->contLen - sizeof(SMultiTableMeta), 1, tmp + sizeof(SMultiTableMeta), (int32_t)pMultiMeta->contLen - sizeof(SMultiTableMeta) + 2, ONE_STAGE_COMP, NULL, 0); From eabbe88f0e19a7da34e4a403924871879eda6647 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 15 Jul 2021 17:45:42 +0800 Subject: [PATCH 30/38] [td-255] --- src/query/src/qExecutor.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index dbcea4e90c..ecbfe36ee3 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2340,6 +2340,7 @@ static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool } pQueryAttr->order.order = TSDB_ORDER_ASC; + pQueryAttr->needReverseScan = false; } else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) { if (QUERY_IS_ASC_QUERY(pQueryAttr)) { qDebug(msg, pQInfo, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey, @@ -2350,9 +2351,9 @@ static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool } pQueryAttr->order.order = TSDB_ORDER_DESC; + pQueryAttr->needReverseScan = false; } - pQueryAttr->needReverseScan = false; } else { // interval query if (stableQuery) { if (onlyFirstQuery(pQueryAttr)) { From 4b233396c3aef9d7ba0dd940060a1bffc0186dec Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Thu, 15 Jul 2021 20:28:07 +0800 Subject: [PATCH 31/38] [TD-5286]: fix test case (#6871) * [TD-5286]: test insert and query, the ts should be the same * print timestamp * fix the setBytes test cases --- .../java/com/taosdata/jdbc/TSDBPreparedStatementTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index 40ff5c23ef..e48237755f 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -841,13 +841,13 @@ public class TSDBPreparedStatementTest { } @Test - public void setBytes() throws SQLException, IOException { + public void setBytes() throws SQLException { // given long ts = System.currentTimeMillis(); byte[] f8 = "{\"name\": \"john\", \"age\": 10, \"address\": \"192.168.1.100\"}".getBytes(); // when - pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setTimestamp(1, new Timestamp(ts)); pstmt_insert.setBytes(9, f8); int result = pstmt_insert.executeUpdate(); From 8f19dea06d7c56ff46472655e4fa20a6e5d98dd0 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Thu, 15 Jul 2021 21:46:34 +0800 Subject: [PATCH 32/38] [TD-5229]: cname display is incomplete add method in util/sql.py---getColNameList() and checkColNameList add testcase query/queryCnameDisplay.py --- tests/pytest/query/queryCnameDisplay.py | 99 +++++++++++++++++++++++++ tests/pytest/util/sql.py | 23 ++++++ 2 files changed, 122 insertions(+) diff --git a/tests/pytest/query/queryCnameDisplay.py b/tests/pytest/query/queryCnameDisplay.py index e69de29bb2..45903d54f8 100644 --- a/tests/pytest/query/queryCnameDisplay.py +++ b/tests/pytest/query/queryCnameDisplay.py @@ -0,0 +1,99 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import string +import random +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getLongName(self, len, mode = "mixed"): + """ + generate long str + """ + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + return chars + + def checkRegularTableCname(self): + """ + check regular table cname + """ + # len(colName) <=64, generate cname list and make first param = 63 and second param = 65 + cname_list = [] + for i in range(10): + cname_list.append(self.getLongName(64)) + cname_list[0] = self.getLongName(63) + cname_list[1] = self.getLongName(65) + + # create table and insert data + tdSql.execute("CREATE TABLE regular_table_cname_check (ts timestamp, pi1 int, pi2 bigint, pf1 float, pf2 double, ps1 binary(10), pi3 smallint, pi4 tinyint, pb1 bool, ps2 nchar(20))") + tdSql.execute('insert into regular_table_cname_check values (now, 1, 2, 1.1, 2.2, "a", 1, 1, true, "aa");') + tdSql.execute('insert into regular_table_cname_check values (now, 2, 3, 1.2, 2.3, "b", 2, 1, false, "aa");') + tdSql.execute('insert into regular_table_cname_check values (now, 3, 4, 1.3, 2.4, "c", 1, 3, true, "bb");') + + # select as cname with cname_list + sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]} from regular_table_cname_check' + res = tdSql.getColNameList(sql_seq) + + # cname[1] > 64, it is expected to be equal to 64 + cname_list_1_expected = cname_list[1][:-1] + cname_list[1] = cname_list_1_expected + checkColNameList = tdSql.checkColNameList(res, cname_list) + + def checkSuperTableCname(self): + """ + check super table cname + """ + # len(colName) <=64, generate cname list and make first param = 63 and second param = 65 + cname_list = [] + for i in range(19): + cname_list.append(self.getLongName(64)) + cname_list[0] = self.getLongName(63) + cname_list[1] = self.getLongName(65) + + # create table and insert data + tdSql.execute("create table super_table_cname_check (ts timestamp, pi1 int, pi2 bigint, pf1 float, pf2 double, ps1 binary(10), pi3 smallint, pi4 tinyint, pb1 bool, ps2 nchar(20)) tags (si1 int, si2 bigint, sf1 float, sf2 double, ss1 binary(10), si3 smallint, si4 tinyint, sb1 bool, ss2 nchar(20));") + tdSql.execute('create table st1 using super_table_cname_check tags (1, 2, 1.1, 2.2, "a", 1, 1, true, "aa");') + tdSql.execute('insert into st1 values (now, 1, 2, 1.1, 2.2, "a", 1, 1, true, "aa");') + tdSql.execute('insert into st1 values (now, 1, 1, 1.4, 2.3, "b", 3, 2, true, "aa");') + tdSql.execute('insert into st1 values (now, 1, 2, 1.1, 2.2, "a", 1, 1, false, "bb");') + + # select as cname with cname_list + sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]}, count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check' + res = tdSql.getColNameList(sql_seq) + + # cname[1] > 64, it is expected to be equal to 64 + cname_list_1_expected = cname_list[1][:-1] + cname_list[1] = cname_list_1_expected + checkColNameList = tdSql.checkColNameList(res, cname_list) + + def run(self): + tdSql.prepare() + self.checkRegularTableCname() + self.checkSuperTableCname() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 913c158d05..4eb0c8f857 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -79,6 +79,21 @@ class TDSql: raise Exception(repr(e)) return self.queryRows + def getColNameList(self, sql): + self.sql = sql + try: + col_name_list = [] + self.cursor.execute(sql) + self.queryCols = self.cursor.description + for query_col in self.queryCols: + col_name_list.append(query_col[0]) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return col_name_list + def waitedQuery(self, sql, expectRows, timeout): tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout)) self.sql = sql @@ -209,6 +224,14 @@ class TDSql: tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows)) + def checkColNameList(self, col_name_list, expect_col_name_list): + if col_name_list == expect_col_name_list: + tdLog.info("sql:%s, col_name_list:%s == expect_col_name_list:%s" % (self.sql, col_name_list, expect_col_name_list)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list) + tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args) + def taosdStatus(self, state): tdLog.sleep(5) pstate = 0 From 81bc57224a99c69165960ad7bd3b0a1875a65664 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 15 Jul 2021 23:49:39 +0800 Subject: [PATCH 33/38] [TD-5300]: taosdemo stmt debug print. (#6873) * [TD-5300]: taosdemo stmt debug print. * fix default iface is unknown. --- src/kit/taosdemo/taosdemo.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 9c547ff755..0327df5a62 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -569,7 +569,7 @@ SArguments g_args = { 0, // test_mode "127.0.0.1", // host 6030, // port - TAOSC_IFACE, // iface + INTERFACE_BUT, // iface "root", // user #ifdef _TD_POWER_ "powerdb", // password @@ -1429,8 +1429,13 @@ static int printfInsertMeta() { else printf("\ntaosdemo is simulating random data as you request..\n\n"); - printf("interface: \033[33m%s\033[0m\n", - (g_args.iface==TAOSC_IFACE)?"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt"); + if (g_args.iface != INTERFACE_BUT) { + // first time if no iface specified + printf("interface: \033[33m%s\033[0m\n", + (g_args.iface==TAOSC_IFACE)?"taosc": + (g_args.iface==REST_IFACE)?"rest":"stmt"); + } + printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); printf("user: \033[33m%s\033[0m\n", g_Dbs.user); @@ -5038,13 +5043,17 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) uint16_t iface; if (superTblInfo) iface = superTblInfo->iface; - else - iface = g_args.iface; + else { + if (g_args.iface == INTERFACE_BUT) + iface = TAOSC_IFACE; + else + iface = g_args.iface; + } debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, __LINE__, - (g_args.iface==TAOSC_IFACE)? - "taosc":(g_args.iface==REST_IFACE)?"rest":"stmt"); + (iface==TAOSC_IFACE)? + "taosc":(iface==REST_IFACE)?"rest":"stmt"); switch(iface) { case TAOSC_IFACE: @@ -5884,7 +5893,7 @@ static void printStatPerThread(threadInfo *pThreadInfo) pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows, - (double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0))); + (pThreadInfo->totalDelay)?(double)((pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay)/1000.0)): FLT_MAX); } // sync write interlace data @@ -6463,7 +6472,7 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * } static void startMultiThreadInsertData(int threads, char* db_name, - char* precision,SSuperTable* superTblInfo) { + char* precision, SSuperTable* superTblInfo) { int32_t timePrec = TSDB_TIME_PRECISION_MILLI; if (0 != precision[0]) { @@ -7936,7 +7945,12 @@ static void setParaFromArg(){ tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20); tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].iface = g_args.iface; + + if (g_args.iface == INTERFACE_BUT) { + g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE; + } else { + g_Dbs.db[0].superTbls[0].iface = g_args.iface; + } tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP; From 95ff65436aa1b228fe9107d72c34f0fcf1c8340c Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Fri, 16 Jul 2021 09:19:49 +0800 Subject: [PATCH 34/38] [TD-4433]script to control cluster used for testing robust --- tests/{cluster => robust}/cluster.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{cluster => robust}/cluster.sh (100%) diff --git a/tests/cluster/cluster.sh b/tests/robust/cluster.sh similarity index 100% rename from tests/cluster/cluster.sh rename to tests/robust/cluster.sh From ea62c29dcd91086f68b74ca364b193bfa85fc4cb Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Fri, 16 Jul 2021 09:23:00 +0800 Subject: [PATCH 35/38] [TD-5229]: cname display is incomplete add method in util/sql.py---getColNameList() and checkColNameList add testcase query/queryCnameDisplay.py --- tests/pytest/fulltest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 1f45cab13a..ac22d3ee4a 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -240,6 +240,7 @@ python3 ./test.py -f query/nestedQuery/queryInterval.py python3 ./test.py -f query/queryStateWindow.py python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py python3 ./test.py -f query/nestquery_last_row.py +python3 ./test.py -f query/queryCnameDisplay.py #stream From 9d89c37d70e1f3c417bd9aaede91515e77c07477 Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Fri, 16 Jul 2021 09:35:43 +0800 Subject: [PATCH 36/38] Feature/td 5265 (#6878) * [TD-5265]: add test case for executeBatch in Statement * [TD-5265]: support continue process in Statement.executeBatch method * change * change * change * change --- src/connector/jdbc/readme.md | 492 ++++++++++++++---- .../com/taosdata/jdbc/AbstractStatement.java | 42 +- .../java/com/taosdata/jdbc/TSDBConstants.java | 2 + .../java/com/taosdata/jdbc/TSDBDriver.java | 5 + .../jdbc/cases/BatchErrorIgnoreTest.java | 144 +++++ 5 files changed, 567 insertions(+), 118 deletions(-) create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchErrorIgnoreTest.java diff --git a/src/connector/jdbc/readme.md b/src/connector/jdbc/readme.md index e81f078c15..3c52ebb00a 100644 --- a/src/connector/jdbc/readme.md +++ b/src/connector/jdbc/readme.md @@ -1,54 +1,62 @@ +# Java Connector -## TAOS-JDBCDriver 概述 +TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。 -TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 +`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 -由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 +![tdengine-connector](https://www.taosdata.com/cn/documentation/user/pages/images/tdengine-jdbc-connector.png) -* libtaos.so - 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 - -* taos.dll - 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 - -> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。 +上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式: -TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: +* JDBC-JNI:Java 应用在物理节点1(pnode1)上使用 JDBC-JNI 的 API ,直接调用客户端 API(libtaos.so 或 taos.dll)将写入和查询请求发送到位于物理节点2(pnode2)上的 taosd 实例。 +* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。 +* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。 -* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 -* 由于不支持删除和修改,所以也不支持事务操作。 -* 目前不支持表间的 union 操作。 -* 目前不支持嵌套查询(nested query),`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。 +TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: + +* TDengine 目前不支持针对单条数据记录的删除操作。 +* 目前不支持事务操作。 +* 目前不支持嵌套查询(nested query)。 +* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。 -## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 +## JDBC-JNI和JDBC-RESTful的对比 -| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | -| --- | --- | --- | -| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | + + + + + + + + + + + + + + + + + + + + + + + + + +
对比项JDBC-JNIJDBC-RESTful
支持的操作系统linux、windows全平台
是否需要安装 client需要不需要
server 升级后是否需要升级 client需要不需要
写入性能JDBC-RESTful 是 JDBC-JNI 的 50%~90%
查询性能JDBC-RESTful 与 JDBC-JNI 没有差别
-## TDengine DataType 和 Java DataType +注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。 -TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: - -| TDengine DataType | Java DataType | -| --- | --- | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | -| SMALLINT, TINYINT |java.lang.Short | -| BOOL | java.lang.Boolean | -| BINARY, NCHAR | java.lang.String | - -## 如何获取 TAOS-JDBCDriver +## 如何获取 taos-jdbcdriver ### maven 仓库 目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。 + * [sonatype][8] * [mvnrepository][9] * [maven.aliyun][10] @@ -56,56 +64,86 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 maven 项目中使用如下 pom.xml 配置即可: ```xml - - - com.taosdata.jdbc - taos-jdbcdriver - 1.0.3 - - + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.18 + ``` ### 源码编译打包 -下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。 +下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。 -## 使用说明 + +## JDBC的使用说明 ### 获取连接 -如下所示配置即可获取 TDengine Connection: +#### 指定URL获取连接 + +通过指定URL获取连接,如下所示: + ```java -Class.forName("com.taosdata.jdbc.TSDBDriver"); -String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); +String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(jdbcUrl); ``` -> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 + +以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 + +使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要: + +1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”; +2. jdbcUrl 以“jdbc:TAOS-RS://”开头; +3. 使用 6041 作为连接端口。 + +如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示: + +```java +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 + +**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 + +* libtaos.so + 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 + +* taos.dll + 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 + +> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 + +JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 TDengine 的 JDBC URL 规范格式为: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` - -其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: +`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +url中的配置参数如下: * user:登录 TDengine 用户名,默认值 root。 * password:用户登录密码,默认值 taosdata。 -* charset:客户端使用的字符集,默认值为系统字符集。 * cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* charset:客户端使用的字符集,默认值为系统字符集。 * locale:客户端语言环境,默认值系统当前 locale。 * timezone:客户端使用的时区,默认值为系统当前时区。 -以上参数可以在 3 处配置,`优先级由高到低`分别如下: -1. JDBC URL 参数 - 如上所述,可以在 JDBC URL 的参数中指定。 -2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) + + +#### 指定URL和Properties获取连接 + +除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示: ```java public Connection getConn() throws Exception{ Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; + // Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; + // String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); @@ -114,22 +152,68 @@ public Connection getConn() throws Exception{ } ``` -3. 客户端配置文件 taos.cfg +以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030,数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区等信息。 - linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。 -```properties -# client default username -# defaultUser root +properties 中的配置参数如下: +* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 root。 +* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 taosdata。 +* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 +* TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。 +* TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。 -# client default password -# defaultPass taosdata + + +#### 使用客户端配置文件建立连接 + +当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。 +如下所示: + +1. 在 Java 应用中不指定 hostname 和 port + +```java +public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +2. 在配置文件中指定 firstEp 和 secondEp + +``` +# first fully qualified domain name (FQDN) for TDengine system +firstEp cluster_node1:6030 + +# second fully qualified domain name (FQDN) for TDengine system, for cluster only +secondEp cluster_node2:6030 # default system charset -# charset UTF-8 +# charset UTF-8 # system locale # locale en_US.UTF-8 ``` + +以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。 +TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。 + +> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。 + +#### 配置参数的优先级 + +通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复,则参数的`优先级由高到低`分别如下: +1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。 +2. Properties connProps +3. 客户端配置文件 taos.cfg + +例如:在 url 中指定了 password 为 taosdata,在 Properties 中指定了 password 为 taosdemo,那么,JDBC 会使用 url 中的 password 建立连接。 + > 更多详细配置请参考[客户端配置][13] ### 创建数据库和表 @@ -146,6 +230,7 @@ stmt.executeUpdate("use db"); // create table stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); ``` + > 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 ### 插入数据 @@ -156,6 +241,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now System.out.println("insert " + affectedRows + " rows."); ``` + > now 为系统内部函数,默认为服务器当前时间。 > `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。 @@ -177,8 +263,150 @@ while(resultSet.next()){ System.out.printf("%s, %d, %s\n", ts, temperature, humidity); } ``` + > 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 +### 处理异常 + +在报错后,通过SQLException可以获取到错误的信息和错误码: + +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` + +JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。 +具体的错误码请参考: +* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h + +### 通过参数绑定写入数据 + +从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。) + +```java +Statement stmt = conn.createStatement(); +Random r = new Random(); + +// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值: +TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)"); + +// 设定数据表名: +s.setTableName("w1"); +// 设定 TAGS 取值: +s.setTagInt(0, r.nextInt(10)); +s.setTagString(1, "Beijing"); + +int numOfRows = 10; + +// VALUES 部分以逐列的方式进行设置: +ArrayList ts = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + ts.add(System.currentTimeMillis() + i); +} +s.setTimestamp(0, ts); + +ArrayList s1 = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + s1.add(r.nextInt(100)); +} +s.setInt(1, s1); + +ArrayList s2 = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + s2.add("test" + r.nextInt(100)); +} +s.setString(2, s2, 10); + +// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据: +s.columnDataAddBatch(); +// 执行绑定数据后的语句: +s.columnDataExecuteBatch(); +// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值): +s.columnDataClearBatch(); +// 执行完毕,释放资源: +s.columnDataCloseBatch(); +``` + +用于设定 TAGS 取值的方法总共有: +```java +public void setTagNull(int index, int type) +public void setTagBoolean(int index, boolean value) +public void setTagInt(int index, int value) +public void setTagByte(int index, byte value) +public void setTagShort(int index, short value) +public void setTagLong(int index, long value) +public void setTagTimestamp(int index, long value) +public void setTagFloat(int index, float value) +public void setTagDouble(int index, double value) +public void setTagString(int index, String value) +public void setTagNString(int index, String value) +``` + +用于设定 VALUES 数据列的取值的方法总共有: +```java +public void setInt(int columnIndex, ArrayList list) throws SQLException +public void setFloat(int columnIndex, ArrayList list) throws SQLException +public void setTimestamp(int columnIndex, ArrayList list) throws SQLException +public void setLong(int columnIndex, ArrayList list) throws SQLException +public void setDouble(int columnIndex, ArrayList list) throws SQLException +public void setBoolean(int columnIndex, ArrayList list) throws SQLException +public void setByte(int columnIndex, ArrayList list) throws SQLException +public void setShort(int columnIndex, ArrayList list) throws SQLException +public void setString(int columnIndex, ArrayList list, int size) throws SQLException +public void setNString(int columnIndex, ArrayList list, int size) throws SQLException +``` +其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。 + +### 订阅 + +#### 创建 + +```java +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +``` + +`subscribe` 方法的三个参数含义如下: + +* topic:订阅的主题(即名称),此参数是订阅的唯一标识 +* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 +* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + +如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 + +#### 消费数据 + +```java +int total = 0; +while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); + Thread.sleep(1000); +} +``` + +`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 + +#### 关闭订阅 + +```java +sub.close(true); +``` + +`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 ### 关闭资源 @@ -187,12 +415,17 @@ resultSet.close(); stmt.close(); conn.close(); ``` + > `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 + + + ## 与连接池使用 **HikariCP** * 引入相应 HikariCP maven 依赖: + ```xml com.zaxxer @@ -202,31 +435,34 @@ conn.close(); ``` * 使用示例如下: + ```java public static void main(String[] args) throws SQLException { HikariConfig config = new HikariConfig(); + // jdbc properties config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); config.setUsername("root"); config.setPassword("taosdata"); - - config.setMinimumIdle(3); //minimum number of idle connection + // connection pool configurations + config.setMinimumIdle(10); //minimum number of idle connection config.setMaximumPoolSize(10); //maximum number of connection in the pool - config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool - config.setIdleTimeout(60000); // max idle time for recycle idle connection - config.setConnectionTestQuery("describe log.dn"); //validation query - config.setValidationTimeout(3000); //validation query timeout + config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("select server_status()"); //validation query HikariDataSource ds = new HikariDataSource(config); //create datasource - + Connection connection = ds.getConnection(); // get connection Statement statement = connection.createStatement(); // get statement - - //query or insert + + //query or insert // ... - + connection.close(); // put back to conneciton pool } ``` + > 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 > 更多 HikariCP 使用问题请查看[官方说明][5] @@ -243,40 +479,32 @@ conn.close(); ``` * 使用示例如下: + ```java public static void main(String[] args) throws Exception { - Properties properties = new Properties(); - properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); - properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); - properties.put("username","root"); - properties.put("password","taosdata"); - properties.put("maxActive","10"); //maximum number of connection in the pool - properties.put("initialSize","3");//initial number of connection - properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool - properties.put("minIdle","3");//minimum number of connection in the pool - - properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection - - properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle - properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle - - properties.put("validationQuery","describe log.dn"); //validation query - properties.put("testWhileIdle","true"); // test connection while idle - properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true - properties.put("testOnReturn","false"); // don't need while testWhileIdle is true - - //create druid datasource - DataSource ds = DruidDataSourceFactory.createDataSource(properties); - Connection connection = ds.getConnection(); // get connection + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("select server_status()"); + + Connection connection = dataSource.getConnection(); // get connection Statement statement = connection.createStatement(); // get statement - //query or insert // ... connection.close(); // put back to conneciton pool } ``` + > 更多 druid 使用问题请查看[官方说明][6] **注意事项** @@ -291,29 +519,64 @@ server_status()| Query OK, 1 row(s) in set (0.000141s) ``` + + ## 与框架使用 * Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11] * Springboot + Mybatis 中使用,可参考 [springbootdemo][12] + + +## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 + +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| -------------------- | ----------------- | -------- | +| 2.0.31 | 2.1.3.0 及以上 | 1.8.x | +| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | +| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | +| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | +| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | + + + +## TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | Java DataType | +| ----------------- | ------------------ | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | +| BOOL | java.lang.Boolean | +| BINARY | byte array | +| NCHAR | java.lang.String | + + + ## 常见问题 * java.lang.UnsatisfiedLinkError: no taos in java.library.path - + **原因**:程序没有找到依赖的本地函数库 taos。 - - **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 - + + **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + * java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform - + **原因**:目前 TDengine 只支持 64 位 JDK。 - + **解决方法**:重新安装 64 位 JDK。 * 其它问题请参考 [Issues][7] - - [1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver [2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver [3]: https://github.com/taosdata/TDengine @@ -324,6 +587,9 @@ Query OK, 1 row(s) in set (0.000141s) [8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver [9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver [10]: https://maven.aliyun.com/mvn/search -[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo -[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE \ No newline at end of file +[13]: https://www.taosdata.com/cn/documentation/administrator/#client +[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client +[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF + diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java index 8b6c074d1b..a801f5a674 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java @@ -2,6 +2,7 @@ package com.taosdata.jdbc; import java.sql.*; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; public abstract class AbstractStatement extends WrapperImpl implements Statement { @@ -196,13 +197,44 @@ public abstract class AbstractStatement extends WrapperImpl implements Statement if (batchedArgs == null || batchedArgs.isEmpty()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY); + String clientInfo = getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE); + boolean batchErrorIgnore = clientInfo == null ? TSDBConstants.DEFAULT_BATCH_ERROR_IGNORE : Boolean.parseBoolean(clientInfo); + + if (batchErrorIgnore) { + return executeBatchIgnoreException(); + } + return executeBatchThrowException(); + } + + private int[] executeBatchIgnoreException() { + return batchedArgs.stream().mapToInt(sql -> { + try { + boolean isSelect = execute(sql); + if (isSelect) { + return SUCCESS_NO_INFO; + } else { + return getUpdateCount(); + } + } catch (SQLException e) { + return EXECUTE_FAILED; + } + }).toArray(); + } + + private int[] executeBatchThrowException() throws BatchUpdateException { int[] res = new int[batchedArgs.size()]; for (int i = 0; i < batchedArgs.size(); i++) { - boolean isSelect = execute(batchedArgs.get(i)); - if (isSelect) { - res[i] = SUCCESS_NO_INFO; - } else { - res[i] = getUpdateCount(); + try { + boolean isSelect = execute(batchedArgs.get(i)); + if (isSelect) { + res[i] = SUCCESS_NO_INFO; + } else { + res[i] = getUpdateCount(); + } + } catch (SQLException e) { + String reason = e.getMessage(); + int[] updateCounts = Arrays.copyOfRange(res, 0, i); + throw new BatchUpdateException(reason, updateCounts, e); } } return res; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java index 5b5128e720..f3f04eff12 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java @@ -74,6 +74,8 @@ public abstract class TSDBConstants { public static final String DEFAULT_PRECISION = "ms"; + public static final boolean DEFAULT_BATCH_ERROR_IGNORE = false; + public static int typeName2JdbcType(String type) { switch (type.toUpperCase()) { case "TIMESTAMP": diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 98a7d1929b..f5f16758c1 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -100,6 +100,11 @@ public class TSDBDriver extends AbstractDriver { */ public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat"; + /** + * continue process commands in executeBatch + */ + public static final String PROPERTY_KEY_BATCH_ERROR_IGNORE = "batchErrorIgnore"; + private TSDBDatabaseMetaData dbMetaData = null; static { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchErrorIgnoreTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchErrorIgnoreTest.java new file mode 100644 index 0000000000..2934b54b5b --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchErrorIgnoreTest.java @@ -0,0 +1,144 @@ +package com.taosdata.jdbc.cases; + +import org.junit.*; + +import java.sql.*; +import java.util.stream.IntStream; + +public class BatchErrorIgnoreTest { + + private static final String host = "127.0.0.1"; + + @Test + public void batchErrorThrowException() throws SQLException { + // given + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + + // when + try (Statement stmt = conn.createStatement()) { + IntStream.range(1, 6).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + ")").forEach(sql -> { + try { + stmt.addBatch(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.addBatch("insert into t11 values(now, 11)"); + IntStream.range(6, 11).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + "),(now + 1s, " + (10 * i) + ")").forEach(sql -> { + try { + stmt.addBatch(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.addBatch("select count(*) from test.weather"); + + stmt.executeBatch(); + } catch (BatchUpdateException e) { + int[] updateCounts = e.getUpdateCounts(); + Assert.assertEquals(5, updateCounts.length); + Assert.assertEquals(1, updateCounts[0]); + Assert.assertEquals(1, updateCounts[1]); + Assert.assertEquals(1, updateCounts[2]); + Assert.assertEquals(1, updateCounts[3]); + Assert.assertEquals(1, updateCounts[4]); + } + + } + + @Test + public void batchErrorIgnore() throws SQLException { + // given + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata&batchErrorIgnore=true"); + + // when + int[] results = null; + try (Statement stmt = conn.createStatement()) { + IntStream.range(1, 6).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + ")").forEach(sql -> { + try { + stmt.addBatch(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.addBatch("insert into t11 values(now, 11)"); + IntStream.range(6, 11).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + "),(now + 1s, " + (10 * i) + ")").forEach(sql -> { + try { + stmt.addBatch(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.addBatch("select count(*) from test.weather"); + + results = stmt.executeBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + assert results != null; + Assert.assertEquals(12, results.length); + Assert.assertEquals(1, results[0]); + Assert.assertEquals(1, results[1]); + Assert.assertEquals(1, results[2]); + Assert.assertEquals(1, results[3]); + Assert.assertEquals(1, results[4]); + Assert.assertEquals(Statement.EXECUTE_FAILED, results[5]); + Assert.assertEquals(2, results[6]); + Assert.assertEquals(2, results[7]); + Assert.assertEquals(2, results[8]); + Assert.assertEquals(2, results[9]); + Assert.assertEquals(2, results[10]); + Assert.assertEquals(Statement.SUCCESS_NO_INFO, results[11]); + } + + @Before + public void before() { + try { + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("use test"); + stmt.execute("drop table if exists weather"); + stmt.execute("create table weather (ts timestamp, f1 float) tags(t1 int)"); + IntStream.range(1, 11).mapToObj(i -> "create table t" + i + " using weather tags(" + i + ")").forEach(sql -> { + try { + stmt.execute(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + try { + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test"); + stmt.execute("create database if not exists test"); + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test"); + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} From 607ff5408b967c414b27bb68525c280dbe323c45 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Fri, 16 Jul 2021 11:32:20 +0800 Subject: [PATCH 37/38] [TD-5229]: cname display is incomplete add method in util/sql.py---getColNameList() and checkColNameList add testcase query/queryCnameDisplay.py --- tests/pytest/query/queryCnameDisplay.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/pytest/query/queryCnameDisplay.py b/tests/pytest/query/queryCnameDisplay.py index 45903d54f8..8864c0e376 100644 --- a/tests/pytest/query/queryCnameDisplay.py +++ b/tests/pytest/query/queryCnameDisplay.py @@ -41,7 +41,6 @@ class TDTestCase: cname_list.append(self.getLongName(64)) cname_list[0] = self.getLongName(63) cname_list[1] = self.getLongName(65) - # create table and insert data tdSql.execute("CREATE TABLE regular_table_cname_check (ts timestamp, pi1 int, pi2 bigint, pf1 float, pf2 double, ps1 binary(10), pi3 smallint, pi4 tinyint, pb1 bool, ps2 nchar(20))") tdSql.execute('insert into regular_table_cname_check values (now, 1, 2, 1.1, 2.2, "a", 1, 1, true, "aa");') @@ -50,12 +49,15 @@ class TDTestCase: # select as cname with cname_list sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]} from regular_table_cname_check' + sql_seq_no_as = sql_seq.replace('as ', '') res = tdSql.getColNameList(sql_seq) + res_no_as = tdSql.getColNameList(sql_seq_no_as) # cname[1] > 64, it is expected to be equal to 64 cname_list_1_expected = cname_list[1][:-1] cname_list[1] = cname_list_1_expected checkColNameList = tdSql.checkColNameList(res, cname_list) + checkColNameList = tdSql.checkColNameList(res_no_as, cname_list) def checkSuperTableCname(self): """ @@ -77,12 +79,15 @@ class TDTestCase: # select as cname with cname_list sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]}, count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check' + sql_seq_no_as = sql_seq.replace('as ', '') res = tdSql.getColNameList(sql_seq) + res_no_as = tdSql.getColNameList(sql_seq_no_as) # cname[1] > 64, it is expected to be equal to 64 cname_list_1_expected = cname_list[1][:-1] cname_list[1] = cname_list_1_expected checkColNameList = tdSql.checkColNameList(res, cname_list) + checkColNameList = tdSql.checkColNameList(res_no_as, cname_list) def run(self): tdSql.prepare() From 20e3b5a24c615d42ec2126e6dbe92094f7951f8d Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 16 Jul 2021 15:11:22 +0800 Subject: [PATCH 38/38] Revert "[feature]: implement feature TD-4700" --- src/util/src/tlog.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index b8430f220c..45ff14ffa4 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -96,7 +96,6 @@ static SLogBuff *taosLogBuffNew(int32_t bufSize); static void taosCloseLogByFd(int32_t oldFd); static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum); extern void taosPrintGlobalCfg(); -static volatile int8_t tsNoDisk = 0; static int32_t taosStartLog() { pthread_attr_t threadAttr; @@ -366,17 +365,9 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) { if (tsTotalLogDirGB != 0 && tsAvailLogDirGB < tsMinimalLogDirGB) { - char buf[256] = "\0"; - sprintf(buf, "server disk:%s space remain %.3f GB, total %.1f GB, stop print log.\n", tsLogDir, tsAvailLogDirGB, - tsTotalLogDirGB); - if (atomic_val_compare_exchange_8(&tsNoDisk, 0, 1) == 1) { - taosWrite(tsLogObj.logHandle->fd, buf, (uint32_t)strlen(buf)); - } - puts(buf); + printf("server disk:%s space remain %.3f GB, total %.1f GB, stop print log.\n", tsLogDir, tsAvailLogDirGB, tsTotalLogDirGB); fflush(stdout); return; - } else { - atomic_store_8(&tsNoDisk, 0); } va_list argpointer;