From a214d562abf578423524b98ddc66a55b0a5dc9f6 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Tue, 23 Jun 2020 09:57:06 +0800 Subject: [PATCH 01/42] [modify for coverity scan] --- src/util/src/ttime.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c index 02d72dd1f4..5feda312b1 100644 --- a/src/util/src/ttime.c +++ b/src/util/src/ttime.c @@ -56,7 +56,7 @@ int64_t user_mktime64(const unsigned int year0, const unsigned int mon0, year -= 1; } - int64_t res = (((((int64_t) (year/4 - year/100 + year/400 + 367*mon/12 + day) + + int64_t res = (((((int64_t) (year/4 - year/100 + year/400 + (int64_t)(367*mon)/12 + day) + year*365 - 719499)*24 + hour)*60 + min)*60 + sec); return (res + timezone); From 8808272bedbc4ade32a03d96e4510d082bd2d3b7 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Tue, 23 Jun 2020 11:21:26 +0800 Subject: [PATCH 02/42] [modify for coverity scan] --- src/kit/taosdump/taosdump.c | 29 +++++++++++++++++++---------- src/util/src/tlog.c | 9 +++++---- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 678de7daa7..63cdf259d6 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -644,14 +644,15 @@ int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp) { (void)lseek(fd, 0, SEEK_SET); + STableRecord tableInfo; while (1) { - memset(&tableRecord, 0, sizeof(STableRecord)); - ssize_t ret = read(fd, &tableRecord, sizeof(STableRecord)); + memset(&tableInfo, 0, sizeof(STableRecord)); + ssize_t ret = read(fd, &tableInfo, sizeof(STableRecord)); if (ret <= 0) break; - tableRecord.name[sizeof(tableRecord.name) - 1] = 0; - tableRecord.metric[sizeof(tableRecord.metric) - 1] = 0; - taosDumpTable(tableRecord.name, tableRecord.metric, arguments, fp); + tableInfo.name[sizeof(tableInfo.name) - 1] = 0; + tableInfo.metric[sizeof(tableInfo.metric) - 1] = 0; + taosDumpTable(tableInfo.name, tableInfo.metric, arguments, fp); } close(fd); @@ -910,14 +911,22 @@ int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp) { (void)lseek(fd, 0, SEEK_SET); + STableRecord tableInfo; + char tableName[TSDB_TABLE_NAME_LEN] ; + char metricName[TSDB_TABLE_NAME_LEN]; while (1) { - memset(&tableRecord, 0, sizeof(STableRecord)); - ssize_t ret = read(fd, &tableRecord, sizeof(STableRecord)); + memset(&tableInfo, 0, sizeof(STableRecord)); + memset(tableName, 0, TSDB_TABLE_NAME_LEN); + memset(metricName, 0, TSDB_TABLE_NAME_LEN); + ssize_t ret = read(fd, &tableInfo, sizeof(STableRecord)); if (ret <= 0) break; - tableRecord.name[sizeof(tableRecord.name) - 1] = 0; - tableRecord.metric[sizeof(tableRecord.metric) - 1] = 0; - taosDumpTable(tableRecord.name, tableRecord.metric, arguments, fp); + //tableInfo.name[sizeof(tableInfo.name) - 1] = 0; + //tableInfo.metric[sizeof(tableInfo.metric) - 1] = 0; + //taosDumpTable(tableInfo.name, tableInfo.metric, arguments, fp); + tstrncpy(tableName, tableInfo.name, TSDB_TABLE_NAME_LEN-1); + tstrncpy(metricName, tableInfo.metric, TSDB_TABLE_NAME_LEN-1); + taosDumpTable(tableName, metricName, arguments, fp); } close(fd); diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 50dae7b177..581c4e2e9f 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -276,14 +276,15 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { } } - sprintf(name, "%s.%d", tsLogObj.logName, tsLogObj.flag); + char fileName[LOG_FILE_NAME_LEN + 50] = "\0"; + sprintf(fileName, "%s.%d", tsLogObj.logName, tsLogObj.flag); pthread_mutex_init(&tsLogObj.logMutex, NULL); umask(0); - tsLogObj.logHandle->fd = open(name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + tsLogObj.logHandle->fd = open(fileName, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); if (tsLogObj.logHandle->fd < 0) { - printf("\nfailed to open log file:%s, reason:%s\n", name, strerror(errno)); + printf("\nfailed to open log file:%s, reason:%s\n", fileName, strerror(errno)); return -1; } taosLockFile(tsLogObj.logHandle->fd); @@ -291,7 +292,7 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { // only an estimate for number of lines struct stat filestat; if (fstat(tsLogObj.logHandle->fd, &filestat) < 0) { - printf("\nfailed to fstat log file:%s, reason:%s\n", name, strerror(errno)); + printf("\nfailed to fstat log file:%s, reason:%s\n", fileName, strerror(errno)); return -1; } size = (int32_t)filestat.st_size; From f74a895febd21bc580d6cd01fb9c25e7aa404882 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Wed, 24 Jun 2020 09:32:47 +0800 Subject: [PATCH 03/42] [modify for coverity scan] --- src/os/linux/src/linuxSysPara.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/os/linux/src/linuxSysPara.c b/src/os/linux/src/linuxSysPara.c index a1d013fa72..fce22d6160 100644 --- a/src/os/linux/src/linuxSysPara.c +++ b/src/os/linux/src/linuxSysPara.c @@ -160,7 +160,7 @@ static void taosGetSystemTimezone() { /* load time zone string from /etc/timezone */ FILE *f = fopen("/etc/timezone", "r"); - char buf[65] = {0}; + char buf[68] = {0}; if (f != NULL) { int len = fread(buf, 64, 1, f); if(len < 64 && ferror(f)) { From 850b1fe3fac7a5c78eb203b6ea72ed2b2c1d8745 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Wed, 24 Jun 2020 10:36:32 +0800 Subject: [PATCH 04/42] [modify for coverity scan] --- src/os/linux/src/linuxSysPara.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/os/linux/src/linuxSysPara.c b/src/os/linux/src/linuxSysPara.c index fce22d6160..96a8d1cb81 100644 --- a/src/os/linux/src/linuxSysPara.c +++ b/src/os/linux/src/linuxSysPara.c @@ -170,18 +170,17 @@ static void taosGetSystemTimezone() { } fclose(f); - } - char *lineEnd = strstr(buf, "\n"); - if (lineEnd != NULL) { - *lineEnd = 0; - } + char *lineEnd = strstr(buf, "\n"); + if (lineEnd != NULL) { + *lineEnd = 0; + } - // for CentOS system, /etc/timezone does not exist. Ignore the TZ environment variables - if (strlen(buf) > 0) { - setenv("TZ", buf, 1); + // for CentOS system, /etc/timezone does not exist. Ignore the TZ environment variables + if (strlen(buf) > 0) { + setenv("TZ", buf, 1); + } } - // get and set default timezone tzset(); From 19db51164d32e9b16684abf04ac3852931e55236 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 23 Jul 2020 13:51:33 +0800 Subject: [PATCH 05/42] test coverity --- tests/pytest/util/dnodes.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 582bd0abae..ec3865f4f2 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -351,7 +351,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( @@ -360,7 +360,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( @@ -467,7 +467,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( @@ -476,7 +476,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( From a9c951bd1243b03600429d8daa07e7029f9272df Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 18 Aug 2020 14:28:35 +0800 Subject: [PATCH 06/42] just a test --- src/tsdb/src/tsdbRead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index ac3a6dac07..2ba4a08c84 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2430,7 +2430,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY skey, co } CATCH( code ) { CLEANUP_EXECUTE(); terrno = code; - tsdbUnlockRepoMeta(tsdb); // unlock tsdb in any cases + //tsdbUnlockRepoMeta(tsdb); // unlock tsdb in any cases goto _error; // TODO: more error handling From 36d126c7cec90907b7d4dc0fe13e613aeab7c924 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Tue, 22 Sep 2020 12:51:37 +0800 Subject: [PATCH 07/42] update version.inc to 2.0.4.0 --- cmake/version.inc | 2 +- src/connector/go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index 52d62fca65..aa8a4b6463 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.3.0") + SET(TD_VER_NUMBER "2.0.4.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/src/connector/go b/src/connector/go index 8c58c512b6..567b7b12f3 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 8c58c512b6acda8bcdfa48fdc7140227b5221766 +Subproject commit 567b7b12f3fd2775c718d284beffc8c38dd6c219 From fb064f5337835778bf15efa36f684eb7a894b1fb Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 22 Sep 2020 10:52:43 +0000 Subject: [PATCH 08/42] minor changes --- cmake/version.inc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmake/version.inc b/cmake/version.inc index aa8a4b6463..52fbe3ca58 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -42,6 +42,12 @@ IF (DEFINED CPUTYPE) ELSE () IF (TD_WINDOWS_32) SET(TD_VER_CPUTYPE "x86") + ELSEIF (TD_LINUX_32) + SET(TD_VER_CPUTYPE "x86") + ELSEIF (TD_ARM_32) + SET(TD_VER_CPUTYPE "x86") + ELSEIF (TD_MIPS_32) + SET(TD_VER_CPUTYPE "x86") ELSE () SET(TD_VER_CPUTYPE "x64") ENDIF () From a5f68b0ff8722575c1af901fd8e49319cd0a8b2d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 25 Sep 2020 14:22:49 +0800 Subject: [PATCH 09/42] [td-1604]. --- src/client/src/tscParseInsert.c | 44 ++++++++++++++++----------------- src/client/src/tscPrepare.c | 6 ----- 2 files changed, 21 insertions(+), 29 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 327aac22d1..c7f168fa59 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -406,7 +406,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start return TSDB_CODE_SUCCESS; } -int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error, +int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, SSqlCmd* pCmd, int16_t timePrec, int32_t *code, char *tmpTokenBuf) { int32_t index = 0; SStrToken sToken = {0}; @@ -426,12 +426,17 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ *str += index; if (sToken.type == TK_QUESTION) { + if (pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) { + *code = tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str); + return -1; + } + uint32_t offset = (uint32_t)(start - pDataBlocks->pData); if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) { continue; } - strcpy(error, "client out of memory"); + strcpy(pCmd->payload, "client out of memory"); *code = TSDB_CODE_TSC_OUT_OF_MEMORY; return -1; } @@ -439,8 +444,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ int16_t type = sToken.type; if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL && type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) { - tscSQLSyntaxErrMsg(error, "invalid data or symbol", sToken.z); - *code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; + *code = tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z); return -1; } @@ -470,14 +474,14 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[ } bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); - int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec); + int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec); if (ret != TSDB_CODE_SUCCESS) { *code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; return -1; // NOTE: here 0 mean error! } if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { - tscInvalidSQLErrMsg(error, "client time/server time can not be mixed up", sToken.z); + tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z); *code = TSDB_CODE_TSC_INVALID_TIME_STAMP; return -1; } @@ -522,7 +526,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) { } int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMeta, int maxRows, - SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) { + SParsedDataColInfo *spd, SSqlCmd* pCmd, int32_t *code, char *tmpTokenBuf) { int32_t index = 0; SStrToken sToken; @@ -534,8 +538,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe int32_t precision = tinfo.precision; if (spd->hasVal[0] == false) { - strcpy(error, "primary timestamp column can not be null"); - *code = TSDB_CODE_TSC_INVALID_SQL; + *code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", *str); return -1; } @@ -547,17 +550,17 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe *str += index; if (numOfRows >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) { int32_t tSize; - int32_t retcode = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize); - if (retcode != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client - strcpy(error, "client out of memory"); - *code = retcode; + *code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize); + if (*code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client + strcpy(pCmd->payload, "client out of memory"); return -1; } + ASSERT(tSize > maxRows); maxRows = tSize; } - int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf); + int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, pCmd, precision, code, tmpTokenBuf); if (len <= 0) { // error message has been set in tsParseOneRowData return -1; } @@ -568,7 +571,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe sToken = tStrGetToken(*str, &index, false, 0, NULL); *str += index; if (sToken.n == 0 || sToken.type != TK_RP) { - tscSQLSyntaxErrMsg(error, ") expected", *str); + tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str); *code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; return -1; } @@ -577,7 +580,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe } if (numOfRows <= 0) { - strcpy(error, "no any data points"); + strcpy(pCmd->payload, "no any data points"); *code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; return -1; } else { @@ -704,7 +707,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st return TSDB_CODE_TSC_OUT_OF_MEMORY; } - int32_t numOfRows = tsParseValues(str, dataBuf, pTableMeta, maxNumOfRows, spd, pCmd->payload, &code, tmpTokenBuf); + int32_t numOfRows = tsParseValues(str, dataBuf, pTableMeta, maxNumOfRows, spd, pCmd, &code, tmpTokenBuf); free(tmpTokenBuf); if (numOfRows <= 0) { return code; @@ -724,10 +727,6 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st dataBuf->vgId = pTableMeta->vgroupInfo.vgId; dataBuf->numOfTables = 1; - /* - * the value of pRes->numOfRows does not affect the true result of AFFECTED ROWS, - * which is actually returned from server. - */ *totalNum += numOfRows; return TSDB_CODE_SUCCESS; } @@ -1460,8 +1459,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { char *lineptr = line; strtolower(line, line); - int32_t len = - tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, tinfo.precision, &code, tokenBuf); + int32_t len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd, tinfo.precision, &code, tokenBuf); if (len <= 0 || pTableDataBlock->numOfParams > 0) { pSql->res.code = code; break; diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index c4ca6793ff..73425a81b3 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -43,10 +43,6 @@ typedef struct SNormalStmt { tVariant* params; } SNormalStmt; -//typedef struct SInsertStmt { -// -//} SInsertStmt; - typedef struct STscStmt { bool isInsert; STscObj* taos; @@ -54,7 +50,6 @@ typedef struct STscStmt { SNormalStmt normal; } STscStmt; - static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_t len) { uint16_t size = stmt->numParts + 1; if (size > stmt->sizeParts) { @@ -514,7 +509,6 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { SSqlObj* pSql = pStmt->pSql; size_t sqlLen = strlen(sql); - //doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen); SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; pSql->param = (void*) pSql; From f82c9afcbcde398aaabaea6f7b05b24073f326a8 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Sun, 27 Sep 2020 06:41:22 +0000 Subject: [PATCH 10/42] hotfix for invalid table id --- src/mnode/src/mnodeInt.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/mnode/src/mnodeInt.c b/src/mnode/src/mnodeInt.c index 91c8dcb6e5..fb1b8741a9 100644 --- a/src/mnode/src/mnodeInt.c +++ b/src/mnode/src/mnodeInt.c @@ -39,6 +39,11 @@ void mnodeCreateMsg(SMnodeMsg *pMsg, SRpcMsg *rpcMsg) { } int32_t mnodeInitMsg(SMnodeMsg *pMsg) { + if (pMsg->pUser != NULL) { + mDebug("app:%p:%p, user info already inited", pMsg->rpcMsg.ahandle, pMsg); + return TSDB_CODE_SUCCESS; + } + pMsg->pUser = mnodeGetUserFromConn(pMsg->rpcMsg.handle); if (pMsg->pUser == NULL) { return TSDB_CODE_MND_INVALID_USER; From 9c7aecc3d0ea8f4e9637b09835911712ba78299b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 27 Sep 2020 15:00:35 +0800 Subject: [PATCH 11/42] [td-1619] --- src/tsdb/src/tsdbRead.c | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index a3bc0de272..6c1602a857 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -697,22 +697,41 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* p pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock); if (pCheckInfo->pDataCols == NULL) { - tsdbError("%p failed to malloc buf, %p", pQueryHandle, pQueryHandle->qinfo); + tsdbError("%p failed to malloc buf for pDataCols, %p", pQueryHandle, pQueryHandle->qinfo); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - return terrno; + goto _error; } } STSchema* pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj); - tdInitDataCols(pCheckInfo->pDataCols, pSchema); - tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema); - tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema); + int32_t code = tdInitDataCols(pCheckInfo->pDataCols, pSchema); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("%p failed to malloc buf for pDataCols, %p", pQueryHandle, pQueryHandle->qinfo); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _error; + } + + code = tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %p", pQueryHandle, pQueryHandle->qinfo); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _error; + } + + code = tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %p", pQueryHandle, pQueryHandle->qinfo); + terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; + goto _error; + } int16_t* colIds = pQueryHandle->defaultLoadColumn->pData; int32_t ret = tsdbLoadBlockDataCols(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo, colIds, (int)(QH_GET_NUM_OF_COLS(pQueryHandle))); if (ret != TSDB_CODE_SUCCESS) { - return terrno; + int32_t c = terrno; + assert(c != TSDB_CODE_SUCCESS); + goto _error; } SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; @@ -729,10 +748,16 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* p int64_t elapsedTime = (taosGetTimestampUs() - st); pQueryHandle->cost.blockLoadTime += elapsedTime; - tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64" , rows:%d, elapsed time:%"PRId64 " us, %p", + tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, %p", pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qinfo); - return TSDB_CODE_SUCCESS; + +_error: + pBlock->numOfRows = 0; + + tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, %p", + pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, pQueryHandle->qinfo); + return terrno; } static int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo); From 2811c96b307b977236514b66822dbc5ee742de05 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 10:29:49 +0800 Subject: [PATCH 12/42] TD-1530 --- cmake/define.inc | 12 ++++- src/client/src/TSDBJNIConnector.c | 10 ++-- src/client/src/tscServer.c | 4 +- src/client/src/tscStream.c | 4 +- src/common/src/tdataformat.c | 8 +-- src/connector/go | 2 +- src/os/inc/os.h | 4 ++ src/os/inc/osArm32.h | 90 +++++++++++++++++++++++++++++++ src/os/inc/osWindows.h | 4 +- src/os/src/detail/osSysinfo.c | 1 - src/query/src/qExecutor.c | 4 +- src/rpc/src/rpcMain.c | 2 +- src/rpc/src/rpcUdp.c | 2 +- src/util/src/hash.c | 2 +- 14 files changed, 125 insertions(+), 24 deletions(-) create mode 100644 src/os/inc/osArm32.h diff --git a/cmake/define.inc b/cmake/define.inc index 84df6f5405..98b54bfb50 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -48,6 +48,7 @@ ENDIF () IF (TD_LINUX_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_LINUX_64) + MESSAGE(STATUS "linux64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ADD_DEFINITIONS(-DUSE_LIBICONV) ENDIF () @@ -55,6 +56,7 @@ ENDIF () IF (TD_LINUX_32) ADD_DEFINITIONS(-D_TD_LINUX_32) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "linux32 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () @@ -63,22 +65,26 @@ IF (TD_ARM_64) ADD_DEFINITIONS(-D_TD_ARM_64_) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "arm64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_ARM_32) ADD_DEFINITIONS(-D_TD_ARM_32_) ADD_DEFINITIONS(-D_TD_ARM_) - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + MESSAGE(STATUS "arm32 is defined") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ") ENDIF () IF (TD_MIPS_64) ADD_DEFINITIONS(-D_TD_MIPS_64_) + MESSAGE(STATUS "mips64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_MIPS_32) ADD_DEFINITIONS(-D_TD_MIPS_32_) + MESSAGE(STATUS "mips32 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () @@ -86,6 +92,7 @@ IF (TD_APLHINE) SET(COMMON_FLAGS "${COMMON_FLAGS} -largp") link_libraries(/usr/lib/libargp.a) ADD_DEFINITIONS(-D_ALPINE) + MESSAGE(STATUS "aplhine is defined") ENDIF () IF (TD_LINUX) @@ -118,6 +125,7 @@ IF (TD_DARWIN_64) ADD_DEFINITIONS(-DDARWIN) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "darwin64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") SET(DEBUG_FLAGS "-O0 -DDEBUG") SET(RELEASE_FLAGS "-O0") @@ -147,11 +155,13 @@ IF (TD_WINDOWS_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_WINDOWS_64) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "windows64 is defined") ENDIF () IF (TD_WINDOWS_32) ADD_DEFINITIONS(-D_TD_WINDOWS_32) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "windows32 is defined") ENDIF () INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index bd980b75a3..9368faa0ee 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -149,7 +149,7 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *e JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv *env, jobject jobj, jint optionIndex, jstring optionValue) { if (optionValue == NULL) { - jniDebug("option index:%d value is null", optionIndex); + jniDebug("option index:%d value is null", (int32_t)optionIndex); return 0; } @@ -183,7 +183,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv } (*env)->ReleaseStringUTFChars(env, optionValue, tz1); } else { - jniError("option index:%d is not found", optionIndex); + jniError("option index:%d is not found", (int32_t)optionIndex); } return res; @@ -227,10 +227,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport); if (ret == 0) { jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, - (char *)host, (char *)user, (char *)dbname, jport); + (char *)host, (char *)user, (char *)dbname, (int32_t)jport); } else { jniDebug("jobj:%p, conn:%p, connect to database succeed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret, - (char *)host, (char *)user, (char *)dbname, jport); + (char *)host, (char *)user, (char *)dbname, (int32_t)jport); } if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host); @@ -385,7 +385,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm } jint ret = taos_affected_rows((SSqlObj *)res); - jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, ret); + jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, (int32_t)ret); return ret; } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 0a5cfef80a..e3bd30fb71 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -642,14 +642,14 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList); if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) { - tscError("%p illegal value of numOfCols in query msg: %"PRIu64", table cols:%d", pSql, numOfSrcCols, + tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", pSql, (int64_t)numOfSrcCols, tscGetNumOfColumns(pTableMeta)); return TSDB_CODE_TSC_INVALID_SQL; } if (pQueryInfo->interval.interval < 0) { - tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->interval.interval); + tscError("%p illegal value of aggregation time interval in query msg: %" PRId64, pSql, (int64_t)pQueryInfo->interval.interval); return TSDB_CODE_TSC_INVALID_SQL; } diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 81b8cf7359..d01ede279a 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -398,8 +398,8 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) { - tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream, - pQueryInfo->interval.interval, minIntervalTime); + tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream, + (int64_t)pQueryInfo->interval.interval, minIntervalTime); pQueryInfo->interval.interval = minIntervalTime; } diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index e7f40442a0..f669803263 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -313,13 +313,13 @@ void dataColSetOffset(SDataCol *pCol, int nEle) { SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols)); if (pCols == NULL) { - uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCols), strerror(errno)); + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno)); return NULL; } pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol)); if (pCols->cols == NULL) { - uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCol) * maxCols, strerror(errno)); + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno)); tdFreeDataCols(pCols); return NULL; } @@ -331,7 +331,7 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { pCols->buf = malloc(pCols->bufSize); if (pCols->buf == NULL) { - uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCol) * maxCols, strerror(errno)); + uDebug("malloc failure, size:%"PRId64" failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno)); tdFreeDataCols(pCols); return NULL; } @@ -716,4 +716,4 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) { memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size); return row; -} \ No newline at end of file +} diff --git a/src/connector/go b/src/connector/go index 8d7bf74385..06ec30a0f1 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 8d7bf743852897110cbdcc7c4322cd7a74d4167b +Subproject commit 06ec30a0f1762e8169bf6b9045c82bcaa52bcdf0 diff --git a/src/os/inc/os.h b/src/os/inc/os.h index d4b71173a0..8ce49d6750 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -28,6 +28,10 @@ extern "C" { #include "osArm64.h" #endif +#ifdef _TD_ARM_32_ +#include "osArm32.h" +#endif + #ifdef _TD_LINUX_64 #include "osLinux64.h" #endif diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h new file mode 100644 index 0000000000..9a2b8c1b55 --- /dev/null +++ b/src/os/inc/osArm32.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_OS_ARM32_H +#define TDENGINE_OS_ARM32_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TAOS_OS_FUNC_LZ4 +#define BUILDIN_CLZL(val) __builtin_clzll(val) +#define BUILDIN_CTZL(val) __builtin_ctzll(val) +#define BUILDIN_CLZ(val) __builtin_clz(val) +#define BUILDIN_CTZ(val) __builtin_ctz(val) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/inc/osWindows.h b/src/os/inc/osWindows.h index d4f3d6d2af..dc1da35037 100644 --- a/src/os/inc/osWindows.h +++ b/src/os/inc/osWindows.h @@ -51,8 +51,6 @@ extern "C" { #endif -#define TAOS_OS_FUNC_ATOMIC - #define TAOS_OS_FUNC_LZ4 int32_t BUILDIN_CLZL(uint64_t val); int32_t BUILDIN_CLZ(uint32_t val); @@ -351,4 +349,4 @@ void wordfree(wordexp_t *pwordexp); #ifdef __cplusplus } #endif -#endif \ No newline at end of file +#endif diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index 83ecd85809..f6470fc3e1 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -569,7 +569,6 @@ int taosSystem(const char *cmd) { } } -int _sysctl(struct __sysctl_args *args ); void taosSetCoreDump() { if (0 == tsEnableCoreFile) { return; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f2d324e376..d4c1c8560b 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5050,8 +5050,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } qDebug( - "QInfo %p numOfTables:%"PRIu64", index:%d, numOfGroups:%" PRIzu ", %"PRId64" points returned, total:%"PRId64", offset:%" PRId64, - pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total, + "QInfo %p numOfTables:%" PRIu64 ", index:%d, numOfGroups:%" PRIzu ", %" PRId64 " points returned, total:%" PRId64 ", offset:%" PRId64, + pQInfo, (uint64_t)pQInfo->tableqinfoGroupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total, pQuery->limit.offset); } diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 50b1507a56..f0b8c996c5 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -240,7 +240,7 @@ void *rpcOpen(const SRpcInit *pInit) { size_t size = sizeof(SRpcConn) * pRpc->sessions; pRpc->connList = (SRpcConn *)calloc(1, size); if (pRpc->connList == NULL) { - tError("%s failed to allocate memory for taos connections, size:%ld", pRpc->label, size); + tError("%s failed to allocate memory for taos connections, size:%" PRId64, pRpc->label, (int64_t)size); rpcClose(pRpc); return NULL; } diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c index 8e24aed8f7..4ea47582b9 100644 --- a/src/rpc/src/rpcUdp.c +++ b/src/rpc/src/rpcUdp.c @@ -211,7 +211,7 @@ static void *taosRecvUdpData(void *param) { char *tmsg = malloc(dataLen + tsRpcOverhead); if (NULL == tmsg) { - tError("%s failed to allocate memory, size:%ld", pConn->label, dataLen); + tError("%s failed to allocate memory, size:%" PRId64, pConn->label, (int64_t)dataLen); continue; } else { tDebug("UDP malloc mem: %p", tmsg); diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 7427a2e4f3..625d4af1ac 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -738,7 +738,7 @@ void taosHashTableResize(SHashObj *pHashObj) { int64_t et = taosGetTimestampUs(); - uDebug("hash table resize completed, new capacity:%"PRId64", load factor:%f, elapsed time:%fms", pHashObj->capacity, + uDebug("hash table resize completed, new capacity:%d, load factor:%f, elapsed time:%fms", (int32_t)pHashObj->capacity, ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0); } From 077c8ff41290320adf1100ba1200f3218f8b61f9 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 02:55:59 +0000 Subject: [PATCH 13/42] TD-1530 --- src/os/inc/osArm32.h | 99 +++++++++++++++++++++++++++++++++++++++++-- src/os/inc/osNingsi.h | 2 - 2 files changed, 95 insertions(+), 6 deletions(-) diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h index 9a2b8c1b55..51d305bb9a 100644 --- a/src/os/inc/osArm32.h +++ b/src/os/inc/osArm32.h @@ -78,10 +78,101 @@ extern "C" { #include #define TAOS_OS_FUNC_LZ4 -#define BUILDIN_CLZL(val) __builtin_clzll(val) -#define BUILDIN_CTZL(val) __builtin_ctzll(val) -#define BUILDIN_CLZ(val) __builtin_clz(val) -#define BUILDIN_CTZ(val) __builtin_ctz(val) + #define BUILDIN_CLZL(val) __builtin_clzll(val) + #define BUILDIN_CTZL(val) __builtin_ctzll(val) + #define BUILDIN_CLZ(val) __builtin_clz(val) + #define BUILDIN_CTZ(val) __builtin_ctz(val) + +#define TAOS_OS_FUNC_ATOMIC + #define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) + #define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) + #define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) + #define atomic_load_64(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) + #define atomic_load_ptr(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) + + //specify for arm32 + #define atomic_store_8(ptr, val) (*(ptr)=(val)) + + #define atomic_store_16(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_store_32(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_store_64(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_store_ptr(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_exchange_8(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_exchange_16(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_exchange_32(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_exchange_64(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_exchange_ptr(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap + #define atomic_val_compare_exchange_16 __sync_val_compare_and_swap + #define atomic_val_compare_exchange_32 __sync_val_compare_and_swap + #define atomic_val_compare_exchange_64 __sync_val_compare_and_swap + #define atomic_val_compare_exchange_ptr __sync_val_compare_and_swap + + #define atomic_add_fetch_8(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_add_fetch_16(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_add_fetch_32(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_add_fetch_64(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_add_fetch_ptr(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) + + // specify for arm32 + #define atomic_fetch_add_8(ptr, val) __sync_fetch_and_add((ptr), (val)) + + #define atomic_fetch_add_16(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_add_32(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_add_64(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_add_ptr(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_sub_fetch_8(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_sub_fetch_16(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_sub_fetch_32(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_sub_fetch_64(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_sub_fetch_ptr(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) + + // specify for arm32 + #define atomic_fetch_sub_8(ptr, val) __sync_fetch_and_sub((ptr), (val)) + + #define atomic_fetch_sub_16(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_sub_32(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_sub_64(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_sub_ptr(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_and_fetch_8(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_and_fetch_16(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_and_fetch_32(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_and_fetch_64(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_and_fetch_ptr(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_fetch_and_8(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_and_16(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_and_32(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_and_64(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_and_ptr(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_or_fetch_8(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_or_fetch_16(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_or_fetch_32(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_or_fetch_64(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_or_fetch_ptr(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_fetch_or_8(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_or_16(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_or_32(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_or_64(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_or_ptr(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_xor_fetch_8(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_xor_fetch_16(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_xor_fetch_32(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_xor_fetch_64(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_xor_fetch_ptr(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) + + #define atomic_fetch_xor_8(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_xor_16(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_xor_32(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_xor_64(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) + #define atomic_fetch_xor_ptr(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) #ifdef __cplusplus } diff --git a/src/os/inc/osNingsi.h b/src/os/inc/osNingsi.h index da7f796b59..d88d279745 100644 --- a/src/os/inc/osNingsi.h +++ b/src/os/inc/osNingsi.h @@ -129,8 +129,6 @@ void* atomic_exchange_ptr_impl( void **ptr, void *val ); #define atomic_fetch_xor_64(ptr, val) __sync_fetch_and_xor((ptr), (val)) #define atomic_fetch_xor_ptr(ptr, val) __sync_fetch_and_xor((ptr), (val)) - - #ifdef __cplusplus } #endif From ac004dc40adb59ddadcabadce07b16bffb846b70 Mon Sep 17 00:00:00 2001 From: Bo Xiao <69349626+boxiaobj@users.noreply.github.com> Date: Mon, 28 Sep 2020 11:19:32 +0800 Subject: [PATCH 14/42] Add alter user privilledge in Document add alter user privilledge --- documentation20/webdocs/markdowndocs/administrator-ch.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index 50b388650b..ee978aa79a 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -233,6 +233,12 @@ ALTER USER PASS <'password'>; 修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角 +``` +ALTER USER PRIVILEDGE <'super'|'write'|'read'>; +``` + +修改用户权限为:super/write/read。 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角 + ``` SHOW USERS; ``` From 6a1825905b50005d8e1b3978c84814d4421db8f7 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 03:22:31 +0000 Subject: [PATCH 15/42] minor changes --- src/os/inc/osArm32.h | 91 -------------------------------------- src/os/src/linux/aarch32.c | 34 ++++++++++++++ 2 files changed, 34 insertions(+), 91 deletions(-) create mode 100644 src/os/src/linux/aarch32.c diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h index 51d305bb9a..17b4d2dbd5 100644 --- a/src/os/inc/osArm32.h +++ b/src/os/inc/osArm32.h @@ -83,97 +83,6 @@ extern "C" { #define BUILDIN_CLZ(val) __builtin_clz(val) #define BUILDIN_CTZ(val) __builtin_ctz(val) -#define TAOS_OS_FUNC_ATOMIC - #define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) - #define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) - #define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) - #define atomic_load_64(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) - #define atomic_load_ptr(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST) - - //specify for arm32 - #define atomic_store_8(ptr, val) (*(ptr)=(val)) - - #define atomic_store_16(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_store_32(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_store_64(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_store_ptr(ptr, val) __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_exchange_8(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_exchange_16(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_exchange_32(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_exchange_64(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_exchange_ptr(ptr, val) __atomic_exchange_n((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_val_compare_exchange_8 __sync_val_compare_and_swap - #define atomic_val_compare_exchange_16 __sync_val_compare_and_swap - #define atomic_val_compare_exchange_32 __sync_val_compare_and_swap - #define atomic_val_compare_exchange_64 __sync_val_compare_and_swap - #define atomic_val_compare_exchange_ptr __sync_val_compare_and_swap - - #define atomic_add_fetch_8(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_add_fetch_16(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_add_fetch_32(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_add_fetch_64(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_add_fetch_ptr(ptr, val) __atomic_add_fetch((ptr), (val), __ATOMIC_SEQ_CST) - - // specify for arm32 - #define atomic_fetch_add_8(ptr, val) __sync_fetch_and_add((ptr), (val)) - - #define atomic_fetch_add_16(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_add_32(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_add_64(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_add_ptr(ptr, val) __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_sub_fetch_8(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_sub_fetch_16(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_sub_fetch_32(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_sub_fetch_64(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_sub_fetch_ptr(ptr, val) __atomic_sub_fetch((ptr), (val), __ATOMIC_SEQ_CST) - - // specify for arm32 - #define atomic_fetch_sub_8(ptr, val) __sync_fetch_and_sub((ptr), (val)) - - #define atomic_fetch_sub_16(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_sub_32(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_sub_64(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_sub_ptr(ptr, val) __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_and_fetch_8(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_and_fetch_16(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_and_fetch_32(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_and_fetch_64(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_and_fetch_ptr(ptr, val) __atomic_and_fetch((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_fetch_and_8(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_and_16(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_and_32(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_and_64(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_and_ptr(ptr, val) __atomic_fetch_and((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_or_fetch_8(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_or_fetch_16(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_or_fetch_32(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_or_fetch_64(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_or_fetch_ptr(ptr, val) __atomic_or_fetch((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_fetch_or_8(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_or_16(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_or_32(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_or_64(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_or_ptr(ptr, val) __atomic_fetch_or((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_xor_fetch_8(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_xor_fetch_16(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_xor_fetch_32(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_xor_fetch_64(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_xor_fetch_ptr(ptr, val) __atomic_xor_fetch((ptr), (val), __ATOMIC_SEQ_CST) - - #define atomic_fetch_xor_8(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_xor_16(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_xor_32(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_xor_64(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) - #define atomic_fetch_xor_ptr(ptr, val) __atomic_fetch_xor((ptr), (val), __ATOMIC_SEQ_CST) - #ifdef __cplusplus } #endif diff --git a/src/os/src/linux/aarch32.c b/src/os/src/linux/aarch32.c new file mode 100644 index 0000000000..62ef437685 --- /dev/null +++ b/src/os/src/linux/aarch32.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" + +#ifdef _TD_ARM_32_ + +int8_t atomic_store_8(void *ptr, int8_t val) {} + return __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) +} + +int8_t atomic_fetch_sub_8(void *ptr, int8_t val) {} + return __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) +} + +int8_t atomic_fetch_add_8(void *ptr, int8_t val) {} + return __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) +} + +#endif + From ca028a3d17ce4cc15d34fe61ff032ef2416f2760 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 04:20:40 +0000 Subject: [PATCH 16/42] TD-1530 --- src/os/src/detail/CMakeLists.txt | 4 ++++ src/os/src/linux/aarch32.c | 34 -------------------------------- 2 files changed, 4 insertions(+), 34 deletions(-) delete mode 100644 src/os/src/linux/aarch32.c diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt index afb8935453..cbb8d46da8 100644 --- a/src/os/src/detail/CMakeLists.txt +++ b/src/os/src/detail/CMakeLists.txt @@ -9,3 +9,7 @@ SET_SOURCE_FILES_PROPERTIES(osCoredump.c PROPERTIES COMPILE_FLAGS -w) ADD_LIBRARY(osdetail ${SRC}) TARGET_LINK_LIBRARIES(osdetail os) + +IF (TD_ARM_64) + TARGET_LINK_LIBRARIES(osdetail atomic) +ENDIF () \ No newline at end of file diff --git a/src/os/src/linux/aarch32.c b/src/os/src/linux/aarch32.c deleted file mode 100644 index 62ef437685..0000000000 --- a/src/os/src/linux/aarch32.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#define _DEFAULT_SOURCE -#include "os.h" - -#ifdef _TD_ARM_32_ - -int8_t atomic_store_8(void *ptr, int8_t val) {} - return __atomic_store_n((ptr), (val), __ATOMIC_SEQ_CST) -} - -int8_t atomic_fetch_sub_8(void *ptr, int8_t val) {} - return __atomic_fetch_sub((ptr), (val), __ATOMIC_SEQ_CST) -} - -int8_t atomic_fetch_add_8(void *ptr, int8_t val) {} - return __atomic_fetch_add((ptr), (val), __ATOMIC_SEQ_CST) -} - -#endif - From e32e507e1f949443cb9eafe05cdf352cb402d92f Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 14:29:23 +0800 Subject: [PATCH 17/42] TD-1530 --- cmake/define.inc | 6 +++--- src/common/src/ttypes.c | 8 ++++---- src/common/src/tvariant.c | 16 ++++++++-------- src/inc/taosdef.h | 2 +- src/os/inc/os.h | 6 +++--- src/os/src/detail/CMakeLists.txt | 4 ++-- src/os/src/linux/ningsi.c | 2 +- src/plugins/http/src/httpGcJson.c | 2 +- src/plugins/http/src/httpJson.c | 10 +++++----- src/sync/src/syncRetrieve.c | 2 +- src/sync/test/syncServer.c | 2 +- src/wal/test/waltest.c | 4 ++-- tests/comparisonTest/tdengine/tdengineTest.c | 2 +- tests/examples/c/demo.c | 3 ++- tests/tsim/src/simExe.c | 4 ++-- 15 files changed, 37 insertions(+), 36 deletions(-) diff --git a/cmake/define.inc b/cmake/define.inc index 98b54bfb50..5a5a667cd8 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -62,7 +62,7 @@ ENDIF () IF (TD_ARM_64) ADD_DEFINITIONS(-D_M_X64) - ADD_DEFINITIONS(-D_TD_ARM_64_) + ADD_DEFINITIONS(-D_TD_ARM_64) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "arm64 is defined") @@ -70,7 +70,7 @@ IF (TD_ARM_64) ENDIF () IF (TD_ARM_32) - ADD_DEFINITIONS(-D_TD_ARM_32_) + ADD_DEFINITIONS(-D_TD_ARM_32) ADD_DEFINITIONS(-D_TD_ARM_) MESSAGE(STATUS "arm32 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ") @@ -102,7 +102,7 @@ IF (TD_LINUX) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) IF (TD_NINGSI_60) - ADD_DEFINITIONS(-D_TD_NINGSI_60_) + ADD_DEFINITIONS(-D_TD_NINGSI_60) MESSAGE(STATUS "set ningsi macro to true") ENDIF () diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index ff417b6cde..45ec20ce45 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -235,7 +235,7 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num double csum = 0; csum = GET_DOUBLE_VAL(sum); csum += dsum; -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 SET_DOUBLE_VAL_ALIGN(sum, &csum); SET_DOUBLE_VAL_ALIGN(max, &fmax); SET_DOUBLE_VAL_ALIGN(min, &fmin); @@ -282,7 +282,7 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num csum += dsum; -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 SET_DOUBLE_VAL_ALIGN(sum, &csum); SET_DOUBLE_VAL_ALIGN(max, &dmax); SET_DOUBLE_VAL_ALIGN(min, &dmin); @@ -494,7 +494,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) { break; } case TSDB_DATA_TYPE_FLOAT: { -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 float fv = GET_FLOAT_VAL(src); SET_FLOAT_VAL_ALIGN(val, &fv); #else @@ -503,7 +503,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) { break; }; case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 double dv = GET_DOUBLE_VAL(src); SET_DOUBLE_VAL_ALIGN(val, &dv); #else diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index 069d8eb251..ba118d2ccb 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -709,7 +709,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu return -1; } -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 //memcpy(&payload, &value, sizeof(float)); float fv = (float)value; SET_FLOAT_VAL_ALIGN(payload, &fv); @@ -718,7 +718,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu #endif } } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 //memcpy(&payload, &pVariant->i64Key, sizeof(float)); float fv = (float)pVariant->i64Key; SET_FLOAT_VAL_ALIGN(payload, &fv); @@ -726,7 +726,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu *((float *)payload) = (float)pVariant->i64Key; #endif } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 //memcpy(&payload, &pVariant->dKey, sizeof(float)); float fv = (float)pVariant->dKey; SET_FLOAT_VAL_ALIGN(payload, &fv); @@ -738,7 +738,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu return 0; } -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 float fv = GET_FLOAT_VAL(payload); if (isinf(fv) || isnan(fv) || fv > FLT_MAX || fv < -FLT_MAX) { return -1; @@ -765,21 +765,21 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu return -1; } -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 SET_DOUBLE_VAL_ALIGN(payload, &value); #else *((double *)payload) = value; #endif } } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 double dv = (double)(pVariant->i64Key); SET_DOUBLE_VAL_ALIGN(payload, &dv); #else *((double *)payload) = (double)pVariant->i64Key; #endif } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 double dv = (double)(pVariant->dKey); SET_DOUBLE_VAL_ALIGN(payload, &dv); #else @@ -790,7 +790,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu return 0; } -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 double dv = GET_DOUBLE_VAL(payload); if (isinf(dv) || isnan(dv) || dv > DBL_MAX || dv < -DBL_MAX) { return -1; diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 228aba2a5e..f636cde8e5 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -131,7 +131,7 @@ do { \ #define GET_INT16_VAL(x) (*(int16_t *)(x)) #define GET_INT32_VAL(x) (*(int32_t *)(x)) #define GET_INT64_VAL(x) (*(int64_t *)(x)) -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 #define GET_FLOAT_VAL(x) taos_align_get_float(x) #define GET_DOUBLE_VAL(x) taos_align_get_double(x) diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 8ce49d6750..86e16db8b1 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -24,11 +24,11 @@ extern "C" { #include "osDarwin.h" #endif -#ifdef _TD_ARM_64_ +#ifdef _TD_ARM_64 #include "osArm64.h" #endif -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 #include "osArm32.h" #endif @@ -44,7 +44,7 @@ extern "C" { #include "osAlpine.h" #endif -#ifdef _TD_NINGSI_60_ +#ifdef _TD_NINGSI_60 #include "osNingsi.h" #endif diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt index cbb8d46da8..3c6810fcd5 100644 --- a/src/os/src/detail/CMakeLists.txt +++ b/src/os/src/detail/CMakeLists.txt @@ -10,6 +10,6 @@ SET_SOURCE_FILES_PROPERTIES(osCoredump.c PROPERTIES COMPILE_FLAGS -w) ADD_LIBRARY(osdetail ${SRC}) TARGET_LINK_LIBRARIES(osdetail os) -IF (TD_ARM_64) +IF (TD_ARM_32) TARGET_LINK_LIBRARIES(osdetail atomic) -ENDIF () \ No newline at end of file +ENDIF () diff --git a/src/os/src/linux/ningsi.c b/src/os/src/linux/ningsi.c index 793ccac84a..2514c38ece 100644 --- a/src/os/src/linux/ningsi.c +++ b/src/os/src/linux/ningsi.c @@ -16,7 +16,7 @@ #define _DEFAULT_SOURCE #include "os.h" -#ifdef _TD_NINGSI_60_ +#ifdef _TD_NINGSI_60 void* atomic_exchange_ptr_impl(void** ptr, void* val ) { void *old; do { diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c index a291641dc3..fde8ae2176 100644 --- a/src/plugins/http/src/httpGcJson.c +++ b/src/plugins/http/src/httpGcJson.c @@ -145,7 +145,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d,", fields[i].name, *((int32_t *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: - len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%ld", fields[i].name, *((int64_t *)row[i])); + len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%" PRId64, fields[i].name, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, *((float *)row[i])); diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c index 7600fb3e43..1aa6cfac4b 100644 --- a/src/plugins/http/src/httpJson.c +++ b/src/plugins/http/src/httpJson.c @@ -113,7 +113,7 @@ int32_t httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) { httpTrace("context:%p, fd:%d, no data need dump", buf->pContext, buf->pContext->fd); return 0; // there is no data to dump. } else { - int32_t len = sprintf(sLen, "%lx\r\n", srcLen); + int32_t len = sprintf(sLen, "%" PRIx64 "\r\n", srcLen); httpTrace("context:%p, fd:%d, write body, chunkSize:%" PRIu64 ", response:\n%s", buf->pContext, buf->pContext->fd, srcLen, buf->buf); httpWriteBufNoTrace(buf->pContext, sLen, len); @@ -267,9 +267,9 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) { ptm = localtime(&tt); int32_t length = (int32_t) strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm); if (us) { - length += snprintf(ts + length, 8, ".%06ld", t % precision); + length += snprintf(ts + length, 8, ".%06" PRId64, t % precision); } else { - length += snprintf(ts + length, 5, ".%03ld", t % precision); + length += snprintf(ts + length, 5, ".%03" PRId64, t % precision); } httpJsonString(buf, ts, length); @@ -287,9 +287,9 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us) { ptm = localtime(&tt); int32_t length = (int32_t)strftime(ts, 40, "%Y-%m-%dT%H:%M:%S", ptm); if (us) { - length += snprintf(ts + length, 8, ".%06ld", t % precision); + length += snprintf(ts + length, 8, ".%06" PRId64, t % precision); } else { - length += snprintf(ts + length, 5, ".%03ld", t % precision); + length += snprintf(ts + length, 5, ".%03" PRId64, t % precision); } length += (int32_t)strftime(ts + length, 40 - length, "%z", ptm); diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 6d0b847afe..60625d75ec 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -325,7 +325,7 @@ static int syncProcessLastWal(SSyncPeer *pPeer, char *wname, uint32_t index) { // if all data up to fversion is read out, it is over if (pPeer->sversion >= fversion && fversion > 0) { code = 0; - sDebug("%s, data up to fversion:%ld has been read out, bytes:%d", pPeer->id, fversion, bytes); + sDebug("%s, data up to fversion:%" PRId64 " has been read out, bytes:%d", pPeer->id, fversion, bytes); break; } diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index 8e769a461e..aa07779845 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -386,7 +386,7 @@ int main(int argc, char *argv[]) { printf(" [-m msgSize]: message body size, default is:%d\n", msgSize); printf(" [-o compSize]: compression message size, default is:%d\n", tsCompressMsgSize); printf(" [-w write]: write received data to file(0, 1, 2), default is:%d\n", commit); - printf(" [-v version]: initial node version, default is:%ld\n", syncInfo.version); + printf(" [-v version]: initial node version, default is:%" PRId64 "\n", syncInfo.version); printf(" [-r replica]: replicacation number, default is:%d\n", pCfg->replica); printf(" [-q quorum]: quorum, default is:%d\n", pCfg->quorum); printf(" [-d debugFlag]: debug flag, default:%d\n", rpcDebugFlag); diff --git a/src/wal/test/waltest.c b/src/wal/test/waltest.c index 073dbf72af..bbee1347b8 100644 --- a/src/wal/test/waltest.c +++ b/src/wal/test/waltest.c @@ -71,7 +71,7 @@ int main(int argc, char *argv[]) { printf(" [-t total]: total wal files, default is:%d\n", total); printf(" [-r rows]: rows of records per wal file, default is:%d\n", rows); printf(" [-k keep]: keep the wal after closing, default is:%d\n", keep); - printf(" [-v version]: initial version, default is:%ld\n", ver); + printf(" [-v version]: initial version, default is:%" PRId64 "\n", ver); printf(" [-d debugFlag]: debug flag, default:%d\n", dDebugFlag); printf(" [-h help]: print out this help\n\n"); exit(0); @@ -97,7 +97,7 @@ int main(int argc, char *argv[]) { exit(-1); } - printf("version starts from:%ld\n", ver); + printf("version starts from:%" PRId64 "\n", ver); int contLen = sizeof(SWalHead) + size; SWalHead *pHead = (SWalHead *) malloc(contLen); diff --git a/tests/comparisonTest/tdengine/tdengineTest.c b/tests/comparisonTest/tdengine/tdengineTest.c index 3d78a3d0a5..237403f525 100644 --- a/tests/comparisonTest/tdengine/tdengineTest.c +++ b/tests/comparisonTest/tdengine/tdengineTest.c @@ -278,7 +278,7 @@ void writeData() { free(threads); - printf("---- Spent %f seconds to insert %ld records, speed: %f Rows/Second\n", seconds, statis.totalRows, rs); + printf("---- Spent %f seconds to insert %" PRId64 " records, speed: %f Rows/Second\n", seconds, statis.totalRows, rs); } void readDataImp(void *param) diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index f0e970c332..d64c0de1ce 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -19,6 +19,7 @@ #include #include #include +#include #include // TAOS header file int main(int argc, char *argv[]) { @@ -67,7 +68,7 @@ int main(int argc, char *argv[]) { // insert 10 records int i = 0; for (i = 0; i < 10; ++i) { - sprintf(qstr, "insert into m1 values (%ld, %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello"); + sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello"); printf("qstr: %s\n", qstr); if (taos_query(taos, qstr)) { printf("insert row: %i, reason:%s\n", i, taos_errstr(taos)); diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 463dc33c7c..4bb0cbe86f 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -751,7 +751,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { sprintf(value, "%" PRId64, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT:{ -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 float fv = 0; *(int32_t*)(&fv) = *(int32_t*)row[i]; sprintf(value, "%.5f", fv); @@ -761,7 +761,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { } break; case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32_ +#ifdef _TD_ARM_32 double dv = 0; *(int64_t*)(&dv) = *(int64_t*)row[i]; sprintf(value, "%.9lf", dv); From 3b724adeb3cad451d30a6643deb92bd33b757785 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 07:26:39 +0000 Subject: [PATCH 18/42] TD-1530 --- src/plugins/http/src/httpContext.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index 6abd382f8e..fb8ab48498 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -130,7 +130,7 @@ HttpContext *httpCreateContext(int32_t fd) { HttpContext *httpGetContext(void *ptr) { uint64_t handleVal = (uint64_t)ptr; - HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(HttpContext *)); + HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(uint64_t)); if (ppContext) { HttpContext *pContext = *ppContext; From dec6288006b3cc66124d09f8f09b579f22112062 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 07:59:35 +0000 Subject: [PATCH 19/42] TD-1530 --- src/plugins/http/src/httpContext.c | 10 +++++----- src/util/inc/tcache.h | 10 ++++++++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index fb8ab48498..106ba9a772 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -67,7 +67,7 @@ static void httpDestroyContext(void *data) { } bool httpInitContexts() { - tsHttpServer.contextCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, 2, true, httpDestroyContext, "restc"); + tsHttpServer.contextCache = taosCacheInit(TSDB_CACHE_PTR_KEY, 2, true, httpDestroyContext, "restc"); if (tsHttpServer.contextCache == NULL) { httpError("failed to init context cache"); return false; @@ -117,8 +117,8 @@ HttpContext *httpCreateContext(int32_t fd) { pContext->state = HTTP_CONTEXT_STATE_READY; pContext->parser = httpCreateParser(pContext); - uint64_t handleVal = (uint64_t)pContext; - HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(int64_t), &pContext, sizeof(int64_t), 3000); + TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext; + HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, TSDB_CACHE_PTR_LEN, &pContext, TSDB_CACHE_PTR_LEN, 3000); pContext->ppContext = ppContext; httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); @@ -129,8 +129,8 @@ HttpContext *httpCreateContext(int32_t fd) { } HttpContext *httpGetContext(void *ptr) { - uint64_t handleVal = (uint64_t)ptr; - HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(uint64_t)); + TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)ptr; + HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, TSDB_CACHE_PTR_LEN); if (ppContext) { HttpContext *pContext = *ppContext; diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index af5f30c7c3..6ef02b63d7 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -24,6 +24,16 @@ extern "C" { #include "tlockfree.h" #include "hash.h" +#if defined(_TD_ARM_32) + #define TSDB_CACHE_PTR_KEY TSDB_DATA_TYPE_INT + #define TSDB_CACHE_PTR_TYPE int32_t + #define TSDB_CACHE_PTR_LEN sizeof(int32_t) +#else + #define TSDB_CACHE_PTR_KEY TSDB_DATA_TYPE_BIGINT + #define TSDB_CACHE_PTR_TYPE int64_t + #define TSDB_CACHE_PTR_LEN sizeof(int64_t) +#endif + typedef void (*__cache_free_fn_t)(void*); typedef struct SCacheStatis { From f4900138fe8e8e00c0be22a34c84d15ab29e3dfd Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 28 Sep 2020 17:09:07 +0800 Subject: [PATCH 20/42] [TD-1502] --- tests/examples/go/taosdemo.go | 409 ++++++++++++++++++++++++++++++++++ 1 file changed, 409 insertions(+) create mode 100644 tests/examples/go/taosdemo.go diff --git a/tests/examples/go/taosdemo.go b/tests/examples/go/taosdemo.go new file mode 100644 index 0000000000..b42e1e6d70 --- /dev/null +++ b/tests/examples/go/taosdemo.go @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package main + +import ( + "database/sql" + "fmt" + _ "github.com/taosdata/driver-go/taosSql" + "os" + "sync" + "runtime" + "strconv" + "time" + "flag" + "math/rand" + //"golang.org/x/sys/unix" +) + +const ( + maxLocationSize = 32 + maxSqlBufSize = 65480 +) + +var locations = [maxLocationSize]string { + "Beijing", "Shanghai", "Guangzhou", "Shenzhen", + "HangZhou", "Tianjin", "Wuhan", "Changsha", + "Nanjing", "Xian"} + +type config struct { + hostName string + serverPort int + user string + password string + dbName string + supTblName string + tablePrefix string + numOftables int + numOfRecordsPerTable int + numOfRecordsPerReq int + numOfThreads int + startTimestamp string + startTs int64 + + keep int + days int +} + +var configPara config +var taosDriverName = "taosSql" +var url string + +func init() { + flag.StringVar(&configPara.hostName, "h", "127.0.0.1","The host to connect to TDengine server.") + flag.IntVar(&configPara.serverPort, "p", 6030, "The TCP/IP port number to use for the connection to TDengine server.") + flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") + flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") + flag.StringVar(&configPara.dbName, "d", "test", "Destination database.") + flag.StringVar(&configPara.tablePrefix, "m", "d", "Table prefix name.") + flag.IntVar(&configPara.numOftables, "t", 2, "The number of tables.") + flag.IntVar(&configPara.numOfRecordsPerTable, "n", 10, "The number of records per table.") + flag.IntVar(&configPara.numOfRecordsPerReq, "r", 3, "The number of records per request.") + flag.IntVar(&configPara.numOfThreads, "T", 1, "The number of threads.") + flag.StringVar(&configPara.startTimestamp, "s", "2020-10-01 08:00:00", "The start timestamp for one table.") + flag.Parse() + + configPara.keep = 365 * 20 + configPara.days = 30 + configPara.supTblName = "meters" + + startTs, err := time.ParseInLocation("2006-01-02 15:04:05", configPara.startTimestamp, time.Local) + if err==nil { + configPara.startTs = startTs.UnixNano() / 1e6 + } +} + +func printAllArgs() { + fmt.Printf("\n============= args parse result: =============\n") + fmt.Printf("dbName: %v\n", configPara.hostName) + fmt.Printf("serverPort: %v\n", configPara.serverPort) + fmt.Printf("usr: %v\n", configPara.user) + fmt.Printf("password: %v\n", configPara.password) + fmt.Printf("dbName: %v\n", configPara.dbName) + fmt.Printf("tablePrefix: %v\n", configPara.tablePrefix) + fmt.Printf("numOftables: %v\n", configPara.numOftables) + fmt.Printf("numOfRecordsPerTable: %v\n", configPara.numOfRecordsPerTable) + fmt.Printf("numOfRecordsPerReq: %v\n", configPara.numOfRecordsPerReq) + fmt.Printf("numOfThreads: %v\n", configPara.numOfThreads) + fmt.Printf("startTimestamp: %v[%v]\n", configPara.startTimestamp, configPara.startTs) + fmt.Printf("================================================\n") +} + +func main() { + printAllArgs() + fmt.Printf("Please press enter key to continue....\n") + fmt.Scanln() + + url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" + //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) + // open connect to taos server + //db, err := sql.Open(taosDriverName, url) + //if err != nil { + // fmt.Println("Open database error: %s\n", err) + // os.Exit(1) + //} + //defer db.Close() + + createDatabase(configPara.dbName, configPara.supTblName) + fmt.Printf("======== create database success! ========\n\n") + + //create_table(db, stblName) + multiThreadCreateTable(configPara.numOfThreads, configPara.numOftables, configPara.dbName, configPara.tablePrefix) + fmt.Printf("======== create super table and child tables success! ========\n\n") + + //insert_data(db, demot) + multiThreadInsertData(configPara.numOfThreads, configPara.numOftables, configPara.dbName, configPara.tablePrefix) + fmt.Printf("======== insert data into child tables success! ========\n\n") + + //select_data(db, demot) + selectTest(configPara.dbName, configPara.tablePrefix, configPara.supTblName) + fmt.Printf("======== select data success! ========\n\n") + + fmt.Printf("======== end demo ========\n") +} + +func createDatabase(dbName string, supTblName string) { + db, err := sql.Open(taosDriverName, url) + if err != nil { + fmt.Println("Open database error: %s\n", err) + os.Exit(1) + } + defer db.Close() + + // drop database if exists + sqlStr := "drop database if exists " + dbName + _, err = db.Exec(sqlStr) + checkErr(err, sqlStr) + + time.Sleep(time.Second) + + // create database + sqlStr = "create database " + dbName + " keep " + strconv.Itoa(configPara.keep) + " days " + strconv.Itoa(configPara.days) + _, err = db.Exec(sqlStr) + checkErr(err, sqlStr) + + // use database + //sqlStr = "use " + dbName + //_, err = db.Exec(sqlStr) + //checkErr(err, sqlStr) + + sqlStr = "create table if not exists " + dbName + "." + supTblName + " (ts timestamp, current float, voltage int, phase float) tags(location binary(64), groupId int);" + _, err = db.Exec(sqlStr) + checkErr(err, sqlStr) +} + +func multiThreadCreateTable(threads int, ntables int, dbName string, tablePrefix string) { + st := time.Now().UnixNano() + + if (threads < 1) { + threads = 1; + } + + a := ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + b := ntables % threads; + + last := 0; + endTblId := 0 + wg := sync.WaitGroup{} + for i := 0; i < threads; i++ { + startTblId := last + if (i < b ) { + endTblId = last + a + } else { + endTblId = last + a - 1 + } + last = endTblId + 1 + wg.Add(1) + go createTable(dbName, tablePrefix, startTblId, endTblId, &wg) + } + wg.Wait() + + et := time.Now().UnixNano() + fmt.Printf("create tables spent duration: %6.6fs\n", (float32(et-st))/1e9) +} + +func createTable(dbName string, childTblPrefix string, startTblId int, endTblId int, wg *sync.WaitGroup) { + //fmt.Printf("subThread[%d]: create table from %d to %d \n", unix.Gettid(), startTblId, endTblId) + // windows.GetCurrentThreadId() + + db, err := sql.Open(taosDriverName, url) + if err != nil { + fmt.Println("Open database error: %s\n", err) + os.Exit(1) + } + defer db.Close() + + for i := startTblId; i <= endTblId; i++ { + sqlStr := "create table if not exists " + dbName + "." + childTblPrefix + strconv.Itoa(i) + " using " + dbName + ".meters tags('" + locations[i%maxLocationSize] + "', " + strconv.Itoa(i) + ");" + //fmt.Printf("sqlStr: %v\n", sqlStr) + _, err = db.Exec(sqlStr) + checkErr(err, sqlStr) + } + wg.Done() + runtime.Goexit() +} + +func generateRowData(ts int64) string { + voltage := rand.Int() % 1000 + current := 200 + rand.Float32() + phase := rand.Float32() + values := "( " + strconv.FormatInt(ts, 10) + ", " + strconv.FormatFloat(float64(current), 'f', 6, 64) + ", " + strconv.Itoa(voltage) + ", " + strconv.FormatFloat(float64(phase), 'f', 6, 64) + " ) " + return values +} +func insertData(dbName string, childTblPrefix string, startTblId int, endTblId int, wg *sync.WaitGroup) { + //fmt.Printf("subThread[%d]: insert data to table from %d to %d \n", unix.Gettid(), startTblId, endTblId) + // windows.GetCurrentThreadId() + + db, err := sql.Open(taosDriverName, url) + if err != nil { + fmt.Println("Open database error: %s\n", err) + os.Exit(1) + } + defer db.Close() + + tmpTs := configPara.startTs; + //rand.New(rand.NewSource(time.Now().UnixNano())) + for tID := startTblId; tID <= endTblId; tID++{ + totalNum := 0 + for { + sqlStr := "insert into " + dbName + "." + childTblPrefix + strconv.Itoa(tID) + " values " + currRowNum := 0 + for { + tmpTs += 1000 + valuesOfRow := generateRowData(tmpTs) + currRowNum += 1 + totalNum += 1 + + sqlStr = fmt.Sprintf("%s %s", sqlStr, valuesOfRow) + + if (currRowNum >= configPara.numOfRecordsPerReq || totalNum >= configPara.numOfRecordsPerTable) { + break + } + } + + res, err := db.Exec(sqlStr) + checkErr(err, sqlStr) + + count, err := res.RowsAffected() + checkErr(err, "rows affected") + + if (count != int64(currRowNum)) { + fmt.Printf("insert data, expect affected:%d, actual:%d\n", currRowNum, count) + os.Exit(1) + } + + if (totalNum >= configPara.numOfRecordsPerTable) { + break + } + } + } + + wg.Done() + runtime.Goexit() +} +func multiThreadInsertData(threads int, ntables int, dbName string, tablePrefix string) { + st := time.Now().UnixNano() + + if (threads < 1) { + threads = 1; + } + + a := ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + b := ntables % threads; + + last := 0; + endTblId := 0 + wg := sync.WaitGroup{} + for i := 0; i < threads; i++ { + startTblId := last + if (i < b ) { + endTblId = last + a + } else { + endTblId = last + a - 1 + } + last = endTblId + 1 + wg.Add(1) + go insertData(dbName, tablePrefix, startTblId , endTblId, &wg) + } + wg.Wait() + + et := time.Now().UnixNano() + fmt.Printf("insert data spent duration: %6.6fs\n", (float32(et-st))/1e9) +} +func selectTest(dbName string, tbPrefix string, supTblName string){ + db, err := sql.Open(taosDriverName, url) + if err != nil { + fmt.Println("Open database error: %s\n", err) + os.Exit(1) + } + defer db.Close() + + // select sql 1 + limit := 3 + offset := 0 + sqlStr := "select * from " + dbName + "." + supTblName + " limit " + strconv.Itoa(limit) + " offset " + strconv.Itoa(offset) + rows, err := db.Query(sqlStr) + checkErr(err, sqlStr) + + defer rows.Close() + fmt.Printf("query sql: %s\n", sqlStr) + for rows.Next() { + var ( + ts string + current float32 + voltage int + phase float32 + location string + groupid int + ) + err := rows.Scan(&ts, ¤t, &voltage, &phase, &location, &groupid) + if err != nil { + checkErr(err, "rows scan fail") + } + + fmt.Printf("ts:%s\t current:%f\t voltage:%d\t phase:%f\t location:%s\t groupid:%d\n", ts, current, voltage, phase, location, groupid) + } + // check iteration error + if rows.Err() != nil { + checkErr(err, "rows next iteration error") + } + + // select sql 2 + sqlStr = "select avg(voltage), min(voltage), max(voltage) from " + dbName + "." + tbPrefix + strconv.Itoa( rand.Int() % configPara.numOftables) + rows, err = db.Query(sqlStr) + checkErr(err, sqlStr) + + defer rows.Close() + fmt.Printf("\nquery sql: %s\n", sqlStr) + for rows.Next() { + var ( + voltageAvg float32 + voltageMin int + voltageMax int + ) + err := rows.Scan(&voltageAvg, &voltageMin, &voltageMax) + if err != nil { + checkErr(err, "rows scan fail") + } + + fmt.Printf("avg(voltage):%f\t min(voltage):%d\t max(voltage):%d\n", voltageAvg, voltageMin, voltageMax) + } + // check iteration error + if rows.Err() != nil { + checkErr(err, "rows next iteration error") + } + + // select sql 3 + sqlStr = "select last(*) from " + dbName + "." + supTblName + rows, err = db.Query(sqlStr) + checkErr(err, sqlStr) + + defer rows.Close() + fmt.Printf("\nquery sql: %s\n", sqlStr) + for rows.Next() { + var ( + lastTs string + lastCurrent float32 + lastVoltage int + lastPhase float32 + ) + err := rows.Scan(&lastTs, &lastCurrent, &lastVoltage, &lastPhase) + if err != nil { + checkErr(err, "rows scan fail") + } + + fmt.Printf("last(ts):%s\t last(current):%f\t last(voltage):%d\t last(phase):%f\n", lastTs, lastCurrent, lastVoltage, lastPhase) + } + // check iteration error + if rows.Err() != nil { + checkErr(err, "rows next iteration error") + } +} +func checkErr(err error, prompt string) { + if err != nil { + fmt.Printf("%s\n", prompt) + panic(err) + } +} From 17190c6e3b653fa65c426e9e02ba4162f384378d Mon Sep 17 00:00:00 2001 From: Hui Li Date: Mon, 28 Sep 2020 17:09:47 +0800 Subject: [PATCH 21/42] [del old go demo file] --- tests/examples/go/src/taosapp/taosapp.go | 302 ----------------------- 1 file changed, 302 deletions(-) delete mode 100644 tests/examples/go/src/taosapp/taosapp.go diff --git a/tests/examples/go/src/taosapp/taosapp.go b/tests/examples/go/src/taosapp/taosapp.go deleted file mode 100644 index 30126ea571..0000000000 --- a/tests/examples/go/src/taosapp/taosapp.go +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ -package main - -import ( - "database/sql" - "time" - "log" - "fmt" - _ "github.com/taosdata/driver-go/taosSql" -) - -func main() { - taosDriverName := "taosSql" - demodb := "demodb" - demot := "demot" - - fmt.Printf("\n======== start demo test ========\n") - // open connect to taos server - db, err := sql.Open(taosDriverName, "root:taosdata@/tcp(127.0.0.1:0)/") - if err != nil { - log.Fatalf("Open database error: %s\n", err) - } - defer db.Close() - - drop_database(db, demodb) - create_database(db, demodb) - use_database(db, demodb) - create_table(db, demot) - insert_data(db, demot) - select_data(db, demot) - - fmt.Printf("\n======== start stmt mode test ========\n") - - demodbStmt := "demodbStmt" - demotStmt := "demotStmt" - drop_database_stmt(db, demodbStmt) - create_database_stmt(db, demodbStmt) - use_database_stmt(db, demodbStmt) - create_table_stmt(db, demotStmt) - insert_data_stmt(db, demotStmt) - select_data_stmt(db, demotStmt) - - fmt.Printf("\n======== end demo test ========\n") -} - -func drop_database(db *sql.DB, demodb string) { - st := time.Now().Nanosecond() - res, err := db.Exec("drop database if exists " + demodb) - checkErr(err, "drop database if exists " + demodb) - - affectd, err := res.RowsAffected() - checkErr(err, "drop db, res.RowsAffected") - - et := time.Now().Nanosecond() - - fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func create_database(db *sql.DB, demodb string) { - st := time.Now().Nanosecond() - // create database - res, err := db.Exec("create database " + demodb) - checkErr(err, "create db, db.Exec") - - affectd, err := res.RowsAffected() - checkErr(err, "create db, res.RowsAffected") - - et := time.Now().Nanosecond() - - fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) - - return -} - -func use_database(db *sql.DB, demodb string) { - st := time.Now().Nanosecond() - // use database - res, err := db.Exec("use " + demodb) // notes: must no quote to db name - checkErr(err, "use db db.Exec") - - affectd, err := res.RowsAffected() - checkErr(err, "use db, res.RowsAffected") - - et := time.Now().Nanosecond() - - fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func create_table(db *sql.DB, demot string) { - st := time.Now().Nanosecond() - // create table - res, err := db.Exec("create table " + demot + " (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double)") - checkErr(err, "create table db.Exec") - - affectd, err := res.RowsAffected() - checkErr(err, "create table res.RowsAffected") - - et := time.Now().Nanosecond() - fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func insert_data(db *sql.DB, demot string) { - st := time.Now().Nanosecond() - // insert data - res, err := db.Exec("insert into " + demot + - " values (now, 100, 'beijing', 10, true, 'one', 123.456, 123.456)" + - " (now+1s, 101, 'shanghai', 11, true, 'two', 789.123, 789.123)" + - " (now+2s, 102, 'shenzhen', 12, false, 'three', 456.789, 456.789)") - - checkErr(err, "insert data, db.Exec") - - affectd, err := res.RowsAffected() - checkErr(err, "insert data res.RowsAffected") - - et := time.Now().Nanosecond() - fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func select_data(db *sql.DB, demot string) { - st := time.Now().Nanosecond() - - rows, err := db.Query("select * from ? " , demot) // go text mode - checkErr(err, "select db.Query") - - fmt.Printf("%10s%s%8s %5s %9s%s %s %8s%s %7s%s %8s%s %4s%s %5s%s\n", " ","ts", " ", "id"," ", "name"," ","len", " ","flag"," ", "notes", " ", "fv", " ", " ", "dv") - var affectd int - for rows.Next() { - var ts string - var name string - var id int - var len int8 - var flag bool - var notes string - var fv float32 - var dv float64 - - err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv) - checkErr(err, "select rows.Scan") - - fmt.Printf("%s\t", ts) - fmt.Printf("%d\t",id) - fmt.Printf("%10s\t",name) - fmt.Printf("%d\t",len) - fmt.Printf("%t\t",flag) - fmt.Printf("%s\t",notes) - fmt.Printf("%06.3f\t",fv) - fmt.Printf("%09.6f\n",dv) - - affectd++ - } - - et := time.Now().Nanosecond() - fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) - fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func drop_database_stmt(db *sql.DB,demodb string) { - st := time.Now().Nanosecond() - // drop test db - res, err := db.Exec("drop database if exists " + demodb) - checkErr(err, "drop database " + demodb) - - affectd, err := res.RowsAffected() - checkErr(err, "drop db, res.RowsAffected") - - et := time.Now().Nanosecond() - fmt.Printf("drop database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func create_database_stmt(db *sql.DB,demodb string) { - st := time.Now().Nanosecond() - // create database - //var stmt interface{} - stmt, err := db.Prepare("create database ?") - checkErr(err, "create db, db.Prepare") - - //var res driver.Result - res, err := stmt.Exec(demodb) - checkErr(err, "create db, stmt.Exec") - - //fmt.Printf("Query OK, %d row(s) affected()", res.RowsAffected()) - affectd, err := res.RowsAffected() - checkErr(err, "create db, res.RowsAffected") - - et := time.Now().Nanosecond() - fmt.Printf("create database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func use_database_stmt (db *sql.DB,demodb string) { - st := time.Now().Nanosecond() - // create database - //var stmt interface{} - stmt, err := db.Prepare("use " + demodb) - checkErr(err, "use db, db.Prepare") - - res, err := stmt.Exec() - checkErr(err, "use db, stmt.Exec") - - affectd, err := res.RowsAffected() - checkErr(err, "use db, res.RowsAffected") - - et := time.Now().Nanosecond() - fmt.Printf("use database result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func create_table_stmt (db *sql.DB,demot string) { - st := time.Now().Nanosecond() - // create table - // (ts timestamp, id int, name binary(8), len tinyint, flag bool, notes binary(8), fv float, dv double) - stmt, err := db.Prepare("create table ? (? timestamp, ? int, ? binary(10), ? tinyint, ? bool, ? binary(8), ? float, ? double)") - checkErr(err, "create table db.Prepare") - - res, err := stmt.Exec(demot, "ts", "id", "name", "len", "flag", "notes", "fv", "dv") - checkErr(err, "create table stmt.Exec") - - affectd, err := res.RowsAffected() - checkErr(err, "create table res.RowsAffected") - - et := time.Now().Nanosecond() - fmt.Printf("create table result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func insert_data_stmt(db *sql.DB,demot string) { - st := time.Now().Nanosecond() - // insert data into table - stmt, err := db.Prepare("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?, ?)") - checkErr(err, "insert db.Prepare") - - res, err := stmt.Exec(demot, "now" , 1000, "'haidian'" , 6, true, "'AI world'", 6987.654, 321.987, - "now+1s", 1001, "'changyang'" , 7, false, "'DeepMode'", 12356.456, 128634.456, - "now+2s", 1002, "'chuangping'" , 8, true, "'database'", 3879.456, 65433478.456,) - checkErr(err, "insert data, stmt.Exec") - - affectd, err := res.RowsAffected() - checkErr(err, "res.RowsAffected") - - et := time.Now().Nanosecond() - fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func select_data_stmt(db *sql.DB, demot string) { - st := time.Now().Nanosecond() - - stmt, err := db.Prepare("select ?, ?, ?, ?, ?, ?, ?, ? from ?" ) // go binary mode - checkErr(err, "db.Prepare") - - rows, err := stmt.Query("ts", "id","name","len", "flag","notes", "fv", "dv", demot) - checkErr(err, "stmt.Query") - - fmt.Printf("%10s%s%8s %5s %8s%s %s %10s%s %7s%s %8s%s %11s%s %14s%s\n", " ","ts", " ", "id"," ", "name"," ","len", " ","flag"," ", "notes", " ", "fv", " ", " ", "dv") - var affectd int - for rows.Next() { - var ts string - var name string - var id int - var len int8 - var flag bool - var notes string - var fv float32 - var dv float64 - - err = rows.Scan(&ts, &id, &name, &len, &flag, ¬es, &fv, &dv) - //fmt.Println("start scan fields from row.rs, &fv:", &fv) - //err = rows.Scan(&fv) - checkErr(err, "rows.Scan") - - fmt.Printf("%s\t", ts) - fmt.Printf("%d\t",id) - fmt.Printf("%10s\t",name) - fmt.Printf("%d\t",len) - fmt.Printf("%t\t",flag) - fmt.Printf("%s\t",notes) - fmt.Printf("%06.3f\t",fv) - fmt.Printf("%09.6f\n",dv) - - affectd++ - - } - - et := time.Now().Nanosecond() - fmt.Printf("insert data result:\n %d row(s) affectd (%6.6fs)\n\n", affectd, (float32(et-st))/1E9) -} - -func checkErr(err error, prompt string) { - if err != nil { - fmt.Printf("%s\n", prompt) - panic(err) - } -} From fed24001bc1a191f3f126a8bd3c61f03db2453fd Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 18:36:10 +0800 Subject: [PATCH 22/42] TD1530 --- src/plugins/http/src/httpContext.c | 5 +++-- src/util/inc/tcache.h | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index 106ba9a772..ec60b984b2 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -118,7 +118,8 @@ HttpContext *httpCreateContext(int32_t fd) { pContext->parser = httpCreateParser(pContext); TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext; - HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, TSDB_CACHE_PTR_LEN, &pContext, TSDB_CACHE_PTR_LEN, 3000); + HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext, + sizeof(TSDB_CACHE_PTR_TYPE), 3000); pContext->ppContext = ppContext; httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); @@ -130,7 +131,7 @@ HttpContext *httpCreateContext(int32_t fd) { HttpContext *httpGetContext(void *ptr) { TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)ptr; - HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, TSDB_CACHE_PTR_LEN); + HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE)); if (ppContext) { HttpContext *pContext = *ppContext; diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index 6ef02b63d7..efd51f90ce 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -27,11 +27,9 @@ extern "C" { #if defined(_TD_ARM_32) #define TSDB_CACHE_PTR_KEY TSDB_DATA_TYPE_INT #define TSDB_CACHE_PTR_TYPE int32_t - #define TSDB_CACHE_PTR_LEN sizeof(int32_t) #else #define TSDB_CACHE_PTR_KEY TSDB_DATA_TYPE_BIGINT #define TSDB_CACHE_PTR_TYPE int64_t - #define TSDB_CACHE_PTR_LEN sizeof(int64_t) #endif typedef void (*__cache_free_fn_t)(void*); From 05687d1eba69af601ed9c741cdb332538f92ad76 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 22:30:34 +0800 Subject: [PATCH 23/42] TD-1530 minor changes --- cmake/define.inc | 2 +- src/client/src/tscServer.c | 2 +- src/common/src/tdataformat.c | 6 +++--- src/os/src/detail/CMakeLists.txt | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmake/define.inc b/cmake/define.inc index 5a5a667cd8..7cc8dc7a26 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -57,7 +57,7 @@ IF (TD_LINUX_32) ADD_DEFINITIONS(-D_TD_LINUX_32) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "linux32 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_ARM_64) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e3bd30fb71..2e92b67181 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -642,7 +642,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList); if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) { - tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", pSql, (int64_t)numOfSrcCols, + tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", pSql, (uint64_t)numOfSrcCols, tscGetNumOfColumns(pTableMeta)); return TSDB_CODE_TSC_INVALID_SQL; diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index f669803263..9c241c5c43 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -313,13 +313,13 @@ void dataColSetOffset(SDataCol *pCol, int nEle) { SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols)); if (pCols == NULL) { - uDebug("malloc failure, size:%"PRId64" failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno)); + uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno)); return NULL; } pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol)); if (pCols->cols == NULL) { - uDebug("malloc failure, size:%"PRId64" failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno)); + uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno)); tdFreeDataCols(pCols); return NULL; } @@ -331,7 +331,7 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) { pCols->buf = malloc(pCols->bufSize); if (pCols->buf == NULL) { - uDebug("malloc failure, size:%"PRId64" failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno)); + uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno)); tdFreeDataCols(pCols); return NULL; } diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt index 3c6810fcd5..0d5c130d6e 100644 --- a/src/os/src/detail/CMakeLists.txt +++ b/src/os/src/detail/CMakeLists.txt @@ -10,6 +10,6 @@ SET_SOURCE_FILES_PROPERTIES(osCoredump.c PROPERTIES COMPILE_FLAGS -w) ADD_LIBRARY(osdetail ${SRC}) TARGET_LINK_LIBRARIES(osdetail os) -IF (TD_ARM_32) +IF (TD_ARM_32 OR TD_LINUX_32) TARGET_LINK_LIBRARIES(osdetail atomic) ENDIF () From 283d144d1ce0f3aa30fbc08432b4b2edc9f34349 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 28 Sep 2020 23:57:00 +0800 Subject: [PATCH 24/42] TD-1530 --- src/common/src/ttypes.c | 61 ++++++++++----------------------------- src/common/src/tvariant.c | 58 ++++--------------------------------- src/inc/taosdef.h | 27 +++++++++++------ tests/tsim/src/simExe.c | 22 +++----------- 4 files changed, 44 insertions(+), 124 deletions(-) diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 45ec20ce45..3efe8cd5ea 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -235,15 +235,10 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num double csum = 0; csum = GET_DOUBLE_VAL(sum); csum += dsum; -#ifdef _TD_ARM_32 - SET_DOUBLE_VAL_ALIGN(sum, &csum); - SET_DOUBLE_VAL_ALIGN(max, &fmax); - SET_DOUBLE_VAL_ALIGN(min, &fmin); -#else - *(double*)sum = csum; - *(double*)max = fmax; - *(double*)min = fmin; -#endif + + SET_DOUBLE_VAL(sum, csum); + SET_DOUBLE_VAL(max, fmax); + SET_DOUBLE_VAL(min, fmin); } static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, @@ -281,16 +276,9 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num csum = GET_DOUBLE_VAL(sum); csum += dsum; - -#ifdef _TD_ARM_32 - SET_DOUBLE_VAL_ALIGN(sum, &csum); - SET_DOUBLE_VAL_ALIGN(max, &dmax); - SET_DOUBLE_VAL_ALIGN(min, &dmin); -#else - *(double*) sum = csum; - *(double*) max = dmax; - *(double*) min = dmin; -#endif + SET_DOUBLE_PTR(sum, &csum); + SET_DOUBLE_PTR(max, &dmax); + SET_DOUBLE_PTR(min, &dmin); } static void getStatics_bin(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max, @@ -493,46 +481,29 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) { *((int32_t *)val) = GET_INT32_VAL(src); break; } - case TSDB_DATA_TYPE_FLOAT: { -#ifdef _TD_ARM_32 - float fv = GET_FLOAT_VAL(src); - SET_FLOAT_VAL_ALIGN(val, &fv); -#else - *((float *)val) = GET_FLOAT_VAL(src); -#endif + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_VAL(val, GET_FLOAT_VAL(src)); break; - }; - case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32 - double dv = GET_DOUBLE_VAL(src); - SET_DOUBLE_VAL_ALIGN(val, &dv); -#else - *((double *)val) = GET_DOUBLE_VAL(src); -#endif + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_VAL(val, GET_DOUBLE_VAL(src)); break; - }; case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT: { + case TSDB_DATA_TYPE_BIGINT: *((int64_t *)val) = GET_INT64_VAL(src); break; - }; - case TSDB_DATA_TYPE_SMALLINT: { + case TSDB_DATA_TYPE_SMALLINT: *((int16_t *)val) = GET_INT16_VAL(src); break; - }; case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: { + case TSDB_DATA_TYPE_TINYINT: *((int8_t *)val) = GET_INT8_VAL(src); break; - }; - case TSDB_DATA_TYPE_BINARY: { + case TSDB_DATA_TYPE_BINARY: varDataCopy(val, src); break; - }; - case TSDB_DATA_TYPE_NCHAR: { + case TSDB_DATA_TYPE_NCHAR: varDataCopy(val, src); break; - }; default: { memcpy(val, src, len); break; diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index ba118d2ccb..005def6dc5 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -709,46 +709,21 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu return -1; } -#ifdef _TD_ARM_32 - //memcpy(&payload, &value, sizeof(float)); - float fv = (float)value; - SET_FLOAT_VAL_ALIGN(payload, &fv); -#else - *((float *)payload) = (float)value; -#endif + SET_FLOAT_VAL(payload, value); } } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { -#ifdef _TD_ARM_32 - //memcpy(&payload, &pVariant->i64Key, sizeof(float)); - float fv = (float)pVariant->i64Key; - SET_FLOAT_VAL_ALIGN(payload, &fv); -#else - *((float *)payload) = (float)pVariant->i64Key; -#endif + SET_FLOAT_VAL(payload, pVariant->i64Key); } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { -#ifdef _TD_ARM_32 - //memcpy(&payload, &pVariant->dKey, sizeof(float)); - float fv = (float)pVariant->dKey; - SET_FLOAT_VAL_ALIGN(payload, &fv); -#else - *((float *)payload) = (float)pVariant->dKey; -#endif + SET_FLOAT_VAL(payload, pVariant->dKey); } else if (pVariant->nType == TSDB_DATA_TYPE_NULL) { *((int32_t *)payload) = TSDB_DATA_FLOAT_NULL; return 0; } -#ifdef _TD_ARM_32 float fv = GET_FLOAT_VAL(payload); if (isinf(fv) || isnan(fv) || fv > FLT_MAX || fv < -FLT_MAX) { return -1; } -#else - if (isinf(*((float *)payload)) || isnan(*((float *)payload)) || *((float *)payload) > FLT_MAX || - *((float *)payload) < -FLT_MAX) { - return -1; - } -#endif break; } case TSDB_DATA_TYPE_DOUBLE: { @@ -765,42 +740,21 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu return -1; } -#ifdef _TD_ARM_32 - SET_DOUBLE_VAL_ALIGN(payload, &value); -#else - *((double *)payload) = value; -#endif + SET_DOUBLE_VAL(payload, value); } } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { -#ifdef _TD_ARM_32 - double dv = (double)(pVariant->i64Key); - SET_DOUBLE_VAL_ALIGN(payload, &dv); -#else - *((double *)payload) = (double)pVariant->i64Key; -#endif + SET_DOUBLE_VAL(payload, pVariant->i64Key); } else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) { -#ifdef _TD_ARM_32 - double dv = (double)(pVariant->dKey); - SET_DOUBLE_VAL_ALIGN(payload, &dv); -#else - *((double *)payload) = pVariant->dKey; -#endif + SET_DOUBLE_VAL(payload, pVariant->dKey); } else if (pVariant->nType == TSDB_DATA_TYPE_NULL) { *((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL; return 0; } -#ifdef _TD_ARM_32 double dv = GET_DOUBLE_VAL(payload); if (isinf(dv) || isnan(dv) || dv > DBL_MAX || dv < -DBL_MAX) { return -1; } -#else - if (isinf(*((double *)payload)) || isnan(*((double *)payload)) || *((double *)payload) > DBL_MAX || - *((double *)payload) < -DBL_MAX) { - return -1; - } -#endif break; } diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index f636cde8e5..75d1e39a92 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -132,21 +132,30 @@ do { \ #define GET_INT32_VAL(x) (*(int32_t *)(x)) #define GET_INT64_VAL(x) (*(int64_t *)(x)) #ifdef _TD_ARM_32 - #define GET_FLOAT_VAL(x) taos_align_get_float(x) - #define GET_DOUBLE_VAL(x) taos_align_get_double(x) - - float taos_align_get_float(const char* pBuf); - double taos_align_get_double(const char* pBuf); //#define __float_align_declear() float __underlyFloat = 0.0; //#define __float_align_declear() //#define GET_FLOAT_VAL_ALIGN(x) (*(int32_t*)&(__underlyFloat) = *(int32_t*)(x); __underlyFloat); // notes: src must be float or double type variable !!! - #define SET_FLOAT_VAL_ALIGN(dst, src) (*(int32_t*) dst = *(int32_t*)src); - #define SET_DOUBLE_VAL_ALIGN(dst, src) (*(int64_t*) dst = *(int64_t*)src); + //#define SET_FLOAT_VAL_ALIGN(dst, src) (*(int32_t*) dst = *(int32_t*)src); + //#define SET_DOUBLE_VAL_ALIGN(dst, src) (*(int64_t*) dst = *(int64_t*)src); + + float taos_align_get_float(const char* pBuf); + double taos_align_get_double(const char* pBuf); + + #define GET_FLOAT_VAL(x) taos_align_get_float(x) + #define GET_DOUBLE_VAL(x) taos_align_get_double(x) + #define SET_FLOAT_VAL(x, y) { float z = (float)(y); (*(int32_t*) x = *(int32_t*)z); } + #define SET_DOUBLE_VAL(x, y) { double z = (double)(y); (*(int64_t*) x = *(int64_t*)z); } + #define SET_FLOAT_PTR(x, y) { (*(int32_t*) x = *(int32_t*)y); } + #define SET_DOUBLE_PTR(x, y) { (*(int64_t*) x = *(int64_t*)y); } #else - #define GET_FLOAT_VAL(x) (*(float *)(x)) - #define GET_DOUBLE_VAL(x) (*(double *)(x)) + #define GET_FLOAT_VAL(x) (*(float *)(x)) + #define GET_DOUBLE_VAL(x) (*(double *)(x)) + #define SET_FLOAT_VAL(x, y) { (*(float *)(x)) = (float)(y); } + #define SET_DOUBLE_VAL(x, y) { (*(double *)(x)) = (double)(y); } + #define SET_FLOAT_PTR(x, y) { (*(float *)(x)) = (*(float *)(y)); } + #define SET_DOUBLE_PTR(x, y) { (*(double *)(x)) = (*(double *)(y)); } #endif typedef struct tDataTypeDescriptor { diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 4bb0cbe86f..4048f58362 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -750,25 +750,11 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { case TSDB_DATA_TYPE_BIGINT: sprintf(value, "%" PRId64, *((int64_t *)row[i])); break; - case TSDB_DATA_TYPE_FLOAT:{ -#ifdef _TD_ARM_32 - float fv = 0; - *(int32_t*)(&fv) = *(int32_t*)row[i]; - sprintf(value, "%.5f", fv); -#else - sprintf(value, "%.5f", *((float *)row[i])); -#endif - } + case TSDB_DATA_TYPE_FLOAT: + sprintf(value, "%.5f", GET_FLOAT_VAL(row[i])); break; - case TSDB_DATA_TYPE_DOUBLE: { -#ifdef _TD_ARM_32 - double dv = 0; - *(int64_t*)(&dv) = *(int64_t*)row[i]; - sprintf(value, "%.9lf", dv); -#else - sprintf(value, "%.9lf", *((double *)row[i])); -#endif - } + case TSDB_DATA_TYPE_DOUBLE: + sprintf(value, "%.9lf", GET_DOUBLE_VAL(row[i])); break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: From dcd2eb4bf63383fb8dbdfaed52ae0327cd922523 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 08:46:22 +0800 Subject: [PATCH 25/42] Update administrator-ch.md --- .../webdocs/markdowndocs/administrator-ch.md | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index ee978aa79a..828b91f083 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -130,9 +130,21 @@ TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数 - statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。 - maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值:1000000。 - maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。 -- arbitrator: 系统中裁决器的end point,缺省为空 +- arbitrator: 系统中裁决器的end point,缺省为空。 - timezone、locale、charset 的配置见客户端配置。 +为方便调试,可通过SQL语句临时调整每个dnode的日志配置,系统重启后会失效: + +```mysql +ALTER DNODE +``` + +- dnode_id: 可以通过SQL语句"SHOW DNODES"命令获取 +- config: 要调整的日志参数,在如下列表中取值 + > resetlog 截断旧日志文件,创建一个新日志文件 + > debugFlag < 131 | 135 | 143 > 设置debugFlag为131、135或者143 + + ## 客户端配置 TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 @@ -392,5 +404,5 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。 - +## From f191236d958dddabe2c50b3f7fdaa095b252a7b4 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 08:49:55 +0800 Subject: [PATCH 26/42] Update administrator-ch.md --- documentation20/webdocs/markdowndocs/administrator-ch.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index 828b91f083..d1ad107db6 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -144,6 +144,10 @@ ALTER DNODE > resetlog 截断旧日志文件,创建一个新日志文件 > debugFlag < 131 | 135 | 143 > 设置debugFlag为131、135或者143 +例如: +``` + alter dnode 1 debugFlag 135; +``` ## 客户端配置 From 66b25f1a3f5c90179002d8d107e6d7df5a6c035a Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 08:50:33 +0800 Subject: [PATCH 27/42] Update administrator-ch.md From 226c826a54fd8ba4abfcedf71a1203f379cef319 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 08:55:59 +0800 Subject: [PATCH 28/42] Update faq-ch.md --- documentation20/webdocs/markdowndocs/faq-ch.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index 7bbf7531c8..a47f315490 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -108,4 +108,8 @@ Connection = DriverManager.getConnection(url, properties); 附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在 GitHub提交Issue。 -为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。 +为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过执行SQL语句 +``` + alter dnode debugFlag 135; +``` +临时设置taosd的日志级别。但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。 From 21792262f5bbcf14d5f3188c85e194a79a2243ab Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 01:23:14 +0000 Subject: [PATCH 29/42] TD-1617 --- src/sync/src/syncMain.c | 4 ++-- src/sync/test/syncServer.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index c7b8959926..0daf0b9620 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -491,7 +491,7 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn); if (ip == -1) return NULL; - SSyncPeer *pPeer = (SSyncPeer *)calloc(1, sizeof(SSyncPeer)); + SSyncPeer *pPeer = calloc(1, sizeof(SSyncPeer)); if (pPeer == NULL) return NULL; pPeer->nodeId = pInfo->nodeId; @@ -499,7 +499,7 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { pPeer->ip = ip; pPeer->port = pInfo->nodePort; pPeer->fqdn[sizeof(pPeer->fqdn) - 1] = 0; - snprintf(pPeer->id, sizeof(pPeer->id), "vgId:%d peer:%s:%d", pNode->vgId, pPeer->fqdn, pPeer->port); + snprintf(pPeer->id, sizeof(pPeer->id), "vgId:%d peer:%s:%u", pNode->vgId, pPeer->fqdn, pPeer->port); pPeer->peerFd = -1; pPeer->syncFd = -1; diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index aa07779845..380b971fa8 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -109,7 +109,6 @@ int processRpcMsg(void *item) { if (pCfg->quorum <= 1) { rpcFreeCont(pMsg->pCont); - taosFreeQitem(item); SRpcMsg rpcMsg = {0}; rpcMsg.pCont = rpcMallocCont(msgSize); @@ -117,6 +116,7 @@ int processRpcMsg(void *item) { rpcMsg.handle = pMsg->handle; rpcMsg.code = code; rpcSendResponse(&rpcMsg); + taosFreeQitem(item); } return code; From c2446494ed1753168e943bddabb276fd8b8b046f Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 09:44:36 +0800 Subject: [PATCH 30/42] TD-1530 --- src/client/src/tscServer.c | 5 ++--- src/client/src/tscSystem.c | 2 +- src/client/src/tscUtil.c | 4 ++-- src/mnode/src/mnodeShow.c | 10 +++++----- src/query/src/qExecutor.c | 12 ++++++------ 5 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 2e92b67181..d3a16a5d6d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -234,9 +234,8 @@ int tscSendMsgToServer(SSqlObj *pSql) { } void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { - uint64_t handle = (uint64_t) rpcMsg->ahandle; - - void** p = taosCacheAcquireByKey(tscObjCache, &handle, sizeof(uint64_t)); + TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle; + void** p = taosCacheAcquireByKey(tscObjCache, &handle, sizeof(TSDB_CACHE_PTR_TYPE)); if (p == NULL) { rpcFreeCont(rpcMsg->pCont); return; diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 620fe13a9f..85c8a57058 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -141,7 +141,7 @@ void taos_init_imp(void) { int64_t refreshTime = 10; // 10 seconds by default if (tscMetaCache == NULL) { tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta"); - tscObjCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, refreshTime/2, false, tscFreeSqlObjInCache, "sqlObj"); + tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeSqlObjInCache, "sqlObj"); } tscDebug("client is initialized successfully"); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a98b0272b6..d00b39e68b 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1788,8 +1788,8 @@ void registerSqlObj(SSqlObj* pSql) { int32_t ref = T_REF_INC(pSql->pTscObj); tscDebug("%p add to tscObj:%p, ref:%d", pSql, pSql->pTscObj, ref); - uint64_t p = (uint64_t) pSql; - pSql->self = taosCachePut(tscObjCache, &p, sizeof(uint64_t), &p, sizeof(uint64_t), DEFAULT_LIFE_TIME); + TSDB_CACHE_PTR_TYPE p = (TSDB_CACHE_PTR_TYPE) pSql; + pSql->self = taosCachePut(tscObjCache, &p, sizeof(TSDB_CACHE_PTR_TYPE), &p, sizeof(TSDB_CACHE_PTR_TYPE), DEFAULT_LIFE_TIME); } SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd) { diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index f2caf30564..e587758e46 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -65,7 +65,7 @@ int32_t mnodeInitShow() { mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg); mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg); - tsMnodeShowCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, 5, true, mnodeFreeShowObj, "show"); + tsMnodeShowCache = taosCacheInit(TSDB_CACHE_PTR_KEY, 5, true, mnodeFreeShowObj, "show"); return 0; } @@ -378,8 +378,8 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) { } static bool mnodeAccquireShowObj(SShowObj *pShow) { - uint64_t handleVal = (uint64_t)pShow; - SShowObj **ppShow = taosCacheAcquireByKey(tsMnodeShowCache, &handleVal, sizeof(int64_t)); + TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pShow; + SShowObj **ppShow = taosCacheAcquireByKey(tsMnodeShowCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE)); if (ppShow) { mDebug("%p, show is accquired from cache, data:%p, index:%d", pShow, ppShow, pShow->index); return true; @@ -393,8 +393,8 @@ static void* mnodePutShowObj(SShowObj *pShow) { if (tsMnodeShowCache != NULL) { pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1); - uint64_t handleVal = (uint64_t)pShow; - SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &handleVal, sizeof(int64_t), &pShow, sizeof(int64_t), DEFAULT_SHOWHANDLE_LIFE_SPAN); + TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pShow; + SShowObj **ppShow = taosCachePut(tsMnodeShowCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pShow, sizeof(TSDB_CACHE_PTR_TYPE), DEFAULT_SHOWHANDLE_LIFE_SPAN); pShow->ppShow = (void**)ppShow; mDebug("%p, show is put into cache, data:%p index:%d", pShow, ppShow, pShow->index); return pShow; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index d4c1c8560b..52765260f5 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -7032,7 +7032,7 @@ void* qOpenQueryMgmt(int32_t vgId) { return NULL; } - pQueryMgmt->qinfoPool = taosCacheInit(TSDB_DATA_TYPE_BIGINT, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName); + pQueryMgmt->qinfoPool = taosCacheInit(TSDB_CACHE_PTR_KEY, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName); pQueryMgmt->closed = false; pQueryMgmt->vgId = vgId; @@ -7101,23 +7101,23 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); return NULL; } else { - uint64_t handleVal = (uint64_t) qInfo; - - void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(int64_t), &qInfo, POINTER_BYTES, DEFAULT_QHANDLE_LIFE_SPAN); + TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo; + void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), DEFAULT_QHANDLE_LIFE_SPAN); // pthread_mutex_unlock(&pQueryMgmt->lock); return handle; } } -void** qAcquireQInfo(void* pMgmt, uint64_t key) { +void** qAcquireQInfo(void* pMgmt, uint64_t _key) { SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL || pQueryMgmt->closed) { return NULL; } - void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(uint64_t)); + TSDB_CACHE_PTR_TYPE key = (TSDB_CACHE_PTR_TYPE)_key; + void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, &key, sizeof(TSDB_CACHE_PTR_TYPE)); if (handle == NULL || *handle == NULL) { return NULL; } else { From f9c9699ec93fdcd8c416c835ff0f7dafbaf7a309 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 09:46:42 +0800 Subject: [PATCH 31/42] Update faq-ch.md --- documentation20/webdocs/markdowndocs/faq-ch.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index a47f315490..6a4d4be118 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -108,8 +108,8 @@ Connection = DriverManager.getConnection(url, properties); 附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在 GitHub提交Issue。 -为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过执行SQL语句 +为了保证有足够的debug信息,如果问题能够重复,请修改/etc/taos/taos.cfg文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启taosd, 重复问题,然后再递交。也可以通过如下SQL语句,临时设置taosd的日志级别。 ``` alter dnode debugFlag 135; ``` -临时设置taosd的日志级别。但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。 +但系统正常运行时,请一定将debugFlag设置为131,否则会产生大量的日志信息,降低系统效率。 From b1ac114510bc6d1c19da8c15429914002b479113 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 10:26:50 +0800 Subject: [PATCH 32/42] TD-1530 --- src/inc/taosdef.h | 4 ++-- src/plugins/http/src/httpGcJson.c | 6 +++--- src/plugins/http/src/httpRestJson.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 75d1e39a92..115630d1a5 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -145,8 +145,8 @@ do { \ #define GET_FLOAT_VAL(x) taos_align_get_float(x) #define GET_DOUBLE_VAL(x) taos_align_get_double(x) - #define SET_FLOAT_VAL(x, y) { float z = (float)(y); (*(int32_t*) x = *(int32_t*)z); } - #define SET_DOUBLE_VAL(x, y) { double z = (double)(y); (*(int64_t*) x = *(int64_t*)z); } + #define SET_FLOAT_VAL(x, y) { float z = (float)(y); (*(int32_t*) x = *(int32_t*)(&z)); } + #define SET_DOUBLE_VAL(x, y) { double z = (double)(y); (*(int64_t*) x = *(int64_t*)(&z)); } #define SET_FLOAT_PTR(x, y) { (*(int32_t*) x = *(int32_t*)y); } #define SET_DOUBLE_PTR(x, y) { (*(int64_t*) x = *(int64_t*)y); } #else diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c index fde8ae2176..d50a38201f 100644 --- a/src/plugins/http/src/httpGcJson.c +++ b/src/plugins/http/src/httpGcJson.c @@ -148,10 +148,10 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%" PRId64, fields[i].name, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: - len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, *((float *)row[i])); + len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, GET_FLOAT_VAL(row[i])); break; case TSDB_DATA_TYPE_DOUBLE: - len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.9f", fields[i].name, *((double *)row[i])); + len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.9f", fields[i].name, GET_DOUBLE_VAL(row[i])); break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: @@ -213,7 +213,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, httpJsonFloat(jsonBuf, *((float *)row[i])); break; case TSDB_DATA_TYPE_DOUBLE: - httpJsonDouble(jsonBuf, *((double *)row[i])); + httpJsonDouble(jsonBuf, GET_DOUBLE_VAL(row[i])); break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: diff --git a/src/plugins/http/src/httpRestJson.c b/src/plugins/http/src/httpRestJson.c index f8912331a3..954678c24c 100644 --- a/src/plugins/http/src/httpRestJson.c +++ b/src/plugins/http/src/httpRestJson.c @@ -124,10 +124,10 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, httpJsonInt64(jsonBuf, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: - httpJsonFloat(jsonBuf, *((float *)row[i])); + httpJsonFloat(jsonBuf, GET_FLOAT_VAL(row[i])); break; case TSDB_DATA_TYPE_DOUBLE: - httpJsonDouble(jsonBuf, *((double *)row[i])); + httpJsonDouble(jsonBuf, GET_DOUBLE_VAL(row[i])); break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: From 531ef740e8d079b971e0a35da7051248ff9dfe05 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 10:32:00 +0800 Subject: [PATCH 33/42] TD-1530 --- src/plugins/http/src/httpGcJson.c | 2 +- tests/tsim/src/simExe.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c index d50a38201f..80e4ae3bc2 100644 --- a/src/plugins/http/src/httpGcJson.c +++ b/src/plugins/http/src/httpGcJson.c @@ -210,7 +210,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, httpJsonInt64(jsonBuf, *((int64_t *)row[i])); break; case TSDB_DATA_TYPE_FLOAT: - httpJsonFloat(jsonBuf, *((float *)row[i])); + httpJsonFloat(jsonBuf, GET_FLOAT_VAL(row[i])); break; case TSDB_DATA_TYPE_DOUBLE: httpJsonDouble(jsonBuf, GET_DOUBLE_VAL(row[i])); diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 4048f58362..7f786dfaa9 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -739,13 +739,13 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) { ((((int)(*((char *)row[i]))) == 1) ? "1" : "0")); break; case TSDB_DATA_TYPE_TINYINT: - sprintf(value, "%d", (int)(*((char *)row[i]))); + sprintf(value, "%d", *((int8_t *)row[i])); break; case TSDB_DATA_TYPE_SMALLINT: - sprintf(value, "%d", (int)(*((short *)row[i]))); + sprintf(value, "%d", *((int16_t *)row[i])); break; case TSDB_DATA_TYPE_INT: - sprintf(value, "%d", *((int *)row[i])); + sprintf(value, "%d", *((int32_t *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: sprintf(value, "%" PRId64, *((int64_t *)row[i])); From fe1cbc31af1eccbbdf572bb78b0ff50f2c9a61d5 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 10:39:19 +0800 Subject: [PATCH 34/42] Update tsdbRead.c --- src/tsdb/src/tsdbRead.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 1db2f20a42..a3bc0de272 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2551,7 +2551,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T* tsdb, uint64_t uid, TSKEY skey, co } CATCH( code ) { CLEANUP_EXECUTE(); terrno = code; - //tsdbUnlockRepoMeta(tsdb); // unlock tsdb in any cases + tsdbUnlockRepoMeta(tsdb); // unlock tsdb in any cases goto _error; // TODO: more error handling From 2f346de95ebd7c4d0186cbfa8a2cf657ed516182 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 29 Sep 2020 03:16:47 +0000 Subject: [PATCH 35/42] Fix TD-1635 --- src/inc/taoserror.h | 1 + src/mnode/src/mnodeSdb.c | 6 ++++- src/wal/src/walMain.c | 48 ++++++++++++++++++++++++++++++++-------- 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 67e2d43c98..17b2d24e90 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -246,6 +246,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_ENABLED, 0, 0x0901, "Sync modul // wal TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, 0, 0x1000, "Unexpected generic error in wal") +TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, 0, 0x1001, "WAL file is corrupted") // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, 0, 0x1100, "http server is not onlin") diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 646c17b2b8..4c672eb557 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -185,7 +185,11 @@ static int32_t sdbInitWal() { } sdbInfo("open sdb wal for restore"); - walRestore(tsSdbObj.wal, NULL, sdbWrite); + int code = walRestore(tsSdbObj.wal, NULL, sdbWrite); + if (code != TSDB_CODE_SUCCESS) { + sdbError("failed to open wal for restore, reason:%s", tstrerror(code)); + return -1; + } return 0; } diff --git a/src/wal/src/walMain.c b/src/wal/src/walMain.c index bebad69f32..4987ba2116 100644 --- a/src/wal/src/walMain.c +++ b/src/wal/src/walMain.c @@ -347,9 +347,10 @@ static void walRelease(SWal *pWal) { static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { char *name = pWal->name; + int size = 1024 * 1024; // default 1M buffer size terrno = 0; - char *buffer = malloc(1024000); // size for one record + char *buffer = malloc(size); if (buffer == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); return terrno; @@ -357,7 +358,7 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { SWalHead *pHead = (SWalHead *)buffer; - int fd = open(name, O_RDONLY); + int fd = open(name, O_RDWR); if (fd < 0) { wError("wal:%s, failed to open for restore(%s)", name, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); @@ -367,29 +368,58 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) { wDebug("wal:%s, start to restore", name); + size_t offset = 0; while (1) { int ret = taosTRead(fd, pHead, sizeof(SWalHead)); - if ( ret == 0) break; + if (ret == 0) break; - if (ret != sizeof(SWalHead)) { - wWarn("wal:%s, failed to read head, skip, ret:%d(%s)", name, ret, strerror(errno)); + if (ret < 0) { + wError("wal:%s, failed to read wal head part since %s", name, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); break; } + if (ret < sizeof(SWalHead)) { + wError("wal:%s, failed to read head, ret:%d, skip the rest of file", name, ret); + taosFtruncate(fd, offset); + fsync(fd); + break; + } + if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) { wWarn("wal:%s, cksum is messed up, skip the rest of file", name); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TSDB_CODE_WAL_FILE_CORRUPTED; + ASSERT(false); break; - } + } + + if (pHead->len > size - sizeof(SWalHead)) { + size = sizeof(SWalHead) + pHead->len; + buffer = realloc(buffer, size); + if (buffer == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + break; + } + + pHead = (SWalHead *)buffer; + } ret = taosTRead(fd, pHead->cont, pHead->len); - if ( ret != pHead->len) { - wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret); + if (ret < 0) { + wError("wal:%s failed to read wal body part since %s", name, strerror(errno)); terrno = TAOS_SYSTEM_ERROR(errno); break; } + if (ret < pHead->len) { + wError("wal:%s, failed to read body, len:%d ret:%d, skip the rest of file", name, pHead->len, ret); + taosFtruncate(fd, offset); + fsync(fd); + break; + } + + offset = offset + sizeof(SWalHead) + pHead->len; + if (pWal->keep) pWal->version = pHead->version; (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL); } From 5f97a7b40004d1809511b9abb63a2da31e50a220 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 13:43:17 +0800 Subject: [PATCH 36/42] TD-1530 --- cmake/define.inc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmake/define.inc b/cmake/define.inc index 7cc8dc7a26..6e64c2709a 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -72,18 +72,21 @@ ENDIF () IF (TD_ARM_32) ADD_DEFINITIONS(-D_TD_ARM_32) ADD_DEFINITIONS(-D_TD_ARM_) + ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "arm32 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ") ENDIF () IF (TD_MIPS_64) ADD_DEFINITIONS(-D_TD_MIPS_64_) + ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "mips64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_MIPS_32) ADD_DEFINITIONS(-D_TD_MIPS_32_) + ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "mips32 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () From 7beef60c643646e1085dac0780b3d230a93e46d8 Mon Sep 17 00:00:00 2001 From: zyyang-taosdata Date: Tue, 29 Sep 2020 14:04:16 +0800 Subject: [PATCH 37/42] [TD-1313]: implement getColumns method in the TSDBDatabaseMetadata and update the next method in DatabaseMetaDataResultSet --- src/connector/jdbc/pom.xml | 1 + .../jdbc/DatabaseMetaDataResultSet.java | 24 +- .../taosdata/jdbc/TSDBDatabaseMetaData.java | 1354 +++++++++-------- 3 files changed, 777 insertions(+), 602 deletions(-) diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 36e2fa426b..99409fe277 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -55,6 +55,7 @@ 4.13 test + diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java index 86938031f6..f82c064e75 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java @@ -67,14 +67,23 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public boolean next() throws SQLException { +// boolean ret = false; +// if (rowDataList.size() > 0) { +// ret = rowDataList.iterator().hasNext(); +// if (ret) { +// rowCursor = rowDataList.iterator().next(); +// cursorRowNumber++; +// } +// } +// return ret; + + /**** add by zyyang 2020-09-29 ****************/ boolean ret = false; - if (rowDataList.size() > 0) { - ret = rowDataList.iterator().hasNext(); - if (ret) { - rowCursor = rowDataList.iterator().next(); - cursorRowNumber++; - } + if (!rowDataList.isEmpty() && cursorRowNumber < rowDataList.size()) { + rowCursor = rowDataList.get(cursorRowNumber++); + ret = true; } + return ret; } @@ -91,7 +100,8 @@ public class DatabaseMetaDataResultSet implements ResultSet { @Override public String getString(int columnIndex) throws SQLException { columnIndex--; - return rowCursor.getString(columnIndex, columnMetaDataList.get(columnIndex).getColType()); + int colType = columnMetaDataList.get(columnIndex).getColType(); + return rowCursor.getString(columnIndex, colType); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java index 15f66fa202..e5515c24b7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java @@ -20,519 +20,519 @@ import java.util.List; public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { - private String dbProductName = null; - private String url = null; - private String userName = null; - private Connection conn = null; - - public TSDBDatabaseMetaData(String dbProductName, String url, String userName) { - this.dbProductName = dbProductName; - this.url = url; - this.userName = userName; - } + private String dbProductName = null; + private String url = null; + private String userName = null; + private Connection conn = null; + + public TSDBDatabaseMetaData(String dbProductName, String url, String userName) { + this.dbProductName = dbProductName; + this.url = url; + this.userName = userName; + } - public void setConnection(Connection conn) { - this.conn = conn; - } + public void setConnection(Connection conn) { + this.conn = conn; + } - public T unwrap(Class iface) throws SQLException { - return null; - } + public T unwrap(Class iface) throws SQLException { + return null; + } - public boolean isWrapperFor(Class iface) throws SQLException { - return false; - } + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } - public boolean allProceduresAreCallable() throws SQLException { - return false; - } + public boolean allProceduresAreCallable() throws SQLException { + return false; + } - public boolean allTablesAreSelectable() throws SQLException { - return false; - } + public boolean allTablesAreSelectable() throws SQLException { + return false; + } - public String getURL() throws SQLException { - return this.url; - } + public String getURL() throws SQLException { + return this.url; + } - public String getUserName() throws SQLException { - return this.userName; - } + public String getUserName() throws SQLException { + return this.userName; + } - public boolean isReadOnly() throws SQLException { - return false; - } + public boolean isReadOnly() throws SQLException { + return false; + } - public boolean nullsAreSortedHigh() throws SQLException { - return false; - } + public boolean nullsAreSortedHigh() throws SQLException { + return false; + } - public boolean nullsAreSortedLow() throws SQLException { - return !nullsAreSortedHigh(); - } + public boolean nullsAreSortedLow() throws SQLException { + return !nullsAreSortedHigh(); + } - public boolean nullsAreSortedAtStart() throws SQLException { - return true; - } + public boolean nullsAreSortedAtStart() throws SQLException { + return true; + } - public boolean nullsAreSortedAtEnd() throws SQLException { - return !nullsAreSortedAtStart(); - } + public boolean nullsAreSortedAtEnd() throws SQLException { + return !nullsAreSortedAtStart(); + } - public String getDatabaseProductName() throws SQLException { - return this.dbProductName; - } + public String getDatabaseProductName() throws SQLException { + return this.dbProductName; + } - public String getDatabaseProductVersion() throws SQLException { - return "1.5.1"; - } + public String getDatabaseProductVersion() throws SQLException { + return "1.5.1"; + } - public String getDriverName() throws SQLException { - return TSDBDriver.class.getName(); - } + public String getDriverName() throws SQLException { + return TSDBDriver.class.getName(); + } - public String getDriverVersion() throws SQLException { - return "1.0.0"; - } + public String getDriverVersion() throws SQLException { + return "1.0.0"; + } - public int getDriverMajorVersion() { - return 0; - } + public int getDriverMajorVersion() { + return 0; + } - public int getDriverMinorVersion() { - return 0; - } + public int getDriverMinorVersion() { + return 0; + } - public boolean usesLocalFiles() throws SQLException { - return false; - } + public boolean usesLocalFiles() throws SQLException { + return false; + } - public boolean usesLocalFilePerTable() throws SQLException { - return false; - } + public boolean usesLocalFilePerTable() throws SQLException { + return false; + } - public boolean supportsMixedCaseIdentifiers() throws SQLException { - return false; - } + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return false; + } - public boolean storesUpperCaseIdentifiers() throws SQLException { - return false; - } + public boolean storesUpperCaseIdentifiers() throws SQLException { + return false; + } - public boolean storesLowerCaseIdentifiers() throws SQLException { - return false; - } + public boolean storesLowerCaseIdentifiers() throws SQLException { + return false; + } - public boolean storesMixedCaseIdentifiers() throws SQLException { - return false; - } + public boolean storesMixedCaseIdentifiers() throws SQLException { + return false; + } - public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { - return false; - } + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } - public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { - return false; - } + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return false; + } - public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { - return false; - } + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } - public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { - return false; - } + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } - public String getIdentifierQuoteString() throws SQLException { - return " "; - } + public String getIdentifierQuoteString() throws SQLException { + return " "; + } - public String getSQLKeywords() throws SQLException { - return null; - } + public String getSQLKeywords() throws SQLException { + return null; + } - public String getNumericFunctions() throws SQLException { - return null; - } + public String getNumericFunctions() throws SQLException { + return null; + } - public String getStringFunctions() throws SQLException { - return null; - } + public String getStringFunctions() throws SQLException { + return null; + } - public String getSystemFunctions() throws SQLException { - return null; - } + public String getSystemFunctions() throws SQLException { + return null; + } - public String getTimeDateFunctions() throws SQLException { - return null; - } + public String getTimeDateFunctions() throws SQLException { + return null; + } - public String getSearchStringEscape() throws SQLException { - return null; - } + public String getSearchStringEscape() throws SQLException { + return null; + } - public String getExtraNameCharacters() throws SQLException { - return null; - } + public String getExtraNameCharacters() throws SQLException { + return null; + } - public boolean supportsAlterTableWithAddColumn() throws SQLException { - return true; - } + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return true; + } - public boolean supportsAlterTableWithDropColumn() throws SQLException { - return true; - } + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return true; + } - public boolean supportsColumnAliasing() throws SQLException { - return true; - } + public boolean supportsColumnAliasing() throws SQLException { + return true; + } - public boolean nullPlusNonNullIsNull() throws SQLException { - return false; - } + public boolean nullPlusNonNullIsNull() throws SQLException { + return false; + } - public boolean supportsConvert() throws SQLException { - return false; - } + public boolean supportsConvert() throws SQLException { + return false; + } - public boolean supportsConvert(int fromType, int toType) throws SQLException { - return false; - } + public boolean supportsConvert(int fromType, int toType) throws SQLException { + return false; + } - public boolean supportsTableCorrelationNames() throws SQLException { - return false; - } + public boolean supportsTableCorrelationNames() throws SQLException { + return false; + } - public boolean supportsDifferentTableCorrelationNames() throws SQLException { - return false; - } + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } - public boolean supportsExpressionsInOrderBy() throws SQLException { - return false; - } + public boolean supportsExpressionsInOrderBy() throws SQLException { + return false; + } - public boolean supportsOrderByUnrelated() throws SQLException { - return false; - } + public boolean supportsOrderByUnrelated() throws SQLException { + return false; + } - public boolean supportsGroupBy() throws SQLException { - return false; - } + public boolean supportsGroupBy() throws SQLException { + return false; + } - public boolean supportsGroupByUnrelated() throws SQLException { - return false; - } + public boolean supportsGroupByUnrelated() throws SQLException { + return false; + } - public boolean supportsGroupByBeyondSelect() throws SQLException { - return false; - } + public boolean supportsGroupByBeyondSelect() throws SQLException { + return false; + } - public boolean supportsLikeEscapeClause() throws SQLException { - return false; - } + public boolean supportsLikeEscapeClause() throws SQLException { + return false; + } - public boolean supportsMultipleResultSets() throws SQLException { - return false; - } + public boolean supportsMultipleResultSets() throws SQLException { + return false; + } - public boolean supportsMultipleTransactions() throws SQLException { - return false; - } + public boolean supportsMultipleTransactions() throws SQLException { + return false; + } - public boolean supportsNonNullableColumns() throws SQLException { - return false; - } + public boolean supportsNonNullableColumns() throws SQLException { + return false; + } - public boolean supportsMinimumSQLGrammar() throws SQLException { - return false; - } + public boolean supportsMinimumSQLGrammar() throws SQLException { + return false; + } - public boolean supportsCoreSQLGrammar() throws SQLException { - return false; - } + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } - public boolean supportsExtendedSQLGrammar() throws SQLException { - return false; - } + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } - public boolean supportsANSI92EntryLevelSQL() throws SQLException { - return false; - } + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return false; + } - public boolean supportsANSI92IntermediateSQL() throws SQLException { - return false; - } + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } - public boolean supportsANSI92FullSQL() throws SQLException { - return false; - } + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } - public boolean supportsIntegrityEnhancementFacility() throws SQLException { - return false; - } + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return false; + } - public boolean supportsOuterJoins() throws SQLException { - return false; - } + public boolean supportsOuterJoins() throws SQLException { + return false; + } - public boolean supportsFullOuterJoins() throws SQLException { - return false; - } + public boolean supportsFullOuterJoins() throws SQLException { + return false; + } - public boolean supportsLimitedOuterJoins() throws SQLException { - return false; - } + public boolean supportsLimitedOuterJoins() throws SQLException { + return false; + } - public String getSchemaTerm() throws SQLException { - return null; - } + public String getSchemaTerm() throws SQLException { + return null; + } - public String getProcedureTerm() throws SQLException { - return null; - } + public String getProcedureTerm() throws SQLException { + return null; + } - public String getCatalogTerm() throws SQLException { - return "database"; - } + public String getCatalogTerm() throws SQLException { + return "database"; + } - public boolean isCatalogAtStart() throws SQLException { - return true; - } + public boolean isCatalogAtStart() throws SQLException { + return true; + } - public String getCatalogSeparator() throws SQLException { - return "."; - } + public String getCatalogSeparator() throws SQLException { + return "."; + } - public boolean supportsSchemasInDataManipulation() throws SQLException { - return false; - } + public boolean supportsSchemasInDataManipulation() throws SQLException { + return false; + } - public boolean supportsSchemasInProcedureCalls() throws SQLException { - return false; - } + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return false; + } - public boolean supportsSchemasInTableDefinitions() throws SQLException { - return false; - } + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return false; + } - public boolean supportsSchemasInIndexDefinitions() throws SQLException { - return false; - } + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return false; + } - public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { - return false; - } + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return false; + } - public boolean supportsCatalogsInDataManipulation() throws SQLException { - return true; - } + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return true; + } - public boolean supportsCatalogsInProcedureCalls() throws SQLException { - return false; - } + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return false; + } - public boolean supportsCatalogsInTableDefinitions() throws SQLException { - return false; - } + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return false; + } - public boolean supportsCatalogsInIndexDefinitions() throws SQLException { - return false; - } + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return false; + } - public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { - return false; - } + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return false; + } - public boolean supportsPositionedDelete() throws SQLException { - return false; - } + public boolean supportsPositionedDelete() throws SQLException { + return false; + } - public boolean supportsPositionedUpdate() throws SQLException { - return false; - } + public boolean supportsPositionedUpdate() throws SQLException { + return false; + } - public boolean supportsSelectForUpdate() throws SQLException { - return false; - } + public boolean supportsSelectForUpdate() throws SQLException { + return false; + } - public boolean supportsStoredProcedures() throws SQLException { - return false; - } + public boolean supportsStoredProcedures() throws SQLException { + return false; + } - public boolean supportsSubqueriesInComparisons() throws SQLException { - return false; - } + public boolean supportsSubqueriesInComparisons() throws SQLException { + return false; + } - public boolean supportsSubqueriesInExists() throws SQLException { - return false; - } + public boolean supportsSubqueriesInExists() throws SQLException { + return false; + } - public boolean supportsSubqueriesInIns() throws SQLException { - return false; - } + public boolean supportsSubqueriesInIns() throws SQLException { + return false; + } - public boolean supportsSubqueriesInQuantifieds() throws SQLException { - return false; - } + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return false; + } - public boolean supportsCorrelatedSubqueries() throws SQLException { - return false; - } + public boolean supportsCorrelatedSubqueries() throws SQLException { + return false; + } - public boolean supportsUnion() throws SQLException { - return false; - } + public boolean supportsUnion() throws SQLException { + return false; + } - public boolean supportsUnionAll() throws SQLException { - return false; - } + public boolean supportsUnionAll() throws SQLException { + return false; + } - public boolean supportsOpenCursorsAcrossCommit() throws SQLException { - return false; - } + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } - public boolean supportsOpenCursorsAcrossRollback() throws SQLException { - return false; - } + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } - public boolean supportsOpenStatementsAcrossCommit() throws SQLException { - return false; - } + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return false; + } - public boolean supportsOpenStatementsAcrossRollback() throws SQLException { - return false; - } + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return false; + } - public int getMaxBinaryLiteralLength() throws SQLException { - return 0; - } + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; + } - public int getMaxCharLiteralLength() throws SQLException { - return 0; - } + public int getMaxCharLiteralLength() throws SQLException { + return 0; + } - public int getMaxColumnNameLength() throws SQLException { - return 0; - } + public int getMaxColumnNameLength() throws SQLException { + return 0; + } - public int getMaxColumnsInGroupBy() throws SQLException { - return 0; - } + public int getMaxColumnsInGroupBy() throws SQLException { + return 0; + } - public int getMaxColumnsInIndex() throws SQLException { - return 0; - } + public int getMaxColumnsInIndex() throws SQLException { + return 0; + } - public int getMaxColumnsInOrderBy() throws SQLException { - return 0; - } + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; + } - public int getMaxColumnsInSelect() throws SQLException { - return 0; - } - - public int getMaxColumnsInTable() throws SQLException { - return 0; - } - - public int getMaxConnections() throws SQLException { - return 0; - } - - public int getMaxCursorNameLength() throws SQLException { - return 0; - } - - public int getMaxIndexLength() throws SQLException { - return 0; - } - - public int getMaxSchemaNameLength() throws SQLException { - return 0; - } - - public int getMaxProcedureNameLength() throws SQLException { - return 0; - } - - public int getMaxCatalogNameLength() throws SQLException { - return 0; - } - - public int getMaxRowSize() throws SQLException { - return 0; - } - - public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { - return false; - } - - public int getMaxStatementLength() throws SQLException { - return 0; - } - - public int getMaxStatements() throws SQLException { - return 0; - } - - public int getMaxTableNameLength() throws SQLException { - return 0; - } - - public int getMaxTablesInSelect() throws SQLException { - return 0; - } - - public int getMaxUserNameLength() throws SQLException { - return 0; - } - - public int getDefaultTransactionIsolation() throws SQLException { - return 0; - } - - public boolean supportsTransactions() throws SQLException { - return false; - } - - public boolean supportsTransactionIsolationLevel(int level) throws SQLException { - return false; - } - - public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { - return false; - } - - public boolean supportsDataManipulationTransactionsOnly() throws SQLException { - return false; - } - - public boolean dataDefinitionCausesTransactionCommit() throws SQLException { - return false; - } - - public boolean dataDefinitionIgnoredInTransactions() throws SQLException { - return false; - } - - public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) - throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, - String columnNamePattern) throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); - } - - public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) - throws SQLException { - Statement stmt = null; - if (null != conn && !conn.isClosed()) { - stmt = conn.createStatement(); - if (catalog == null || catalog.length() < 1) { - catalog = conn.getCatalog(); - } + public int getMaxColumnsInSelect() throws SQLException { + return 0; + } + + public int getMaxColumnsInTable() throws SQLException { + return 0; + } + + public int getMaxConnections() throws SQLException { + return 0; + } + + public int getMaxCursorNameLength() throws SQLException { + return 0; + } + + public int getMaxIndexLength() throws SQLException { + return 0; + } + + public int getMaxSchemaNameLength() throws SQLException { + return 0; + } + + public int getMaxProcedureNameLength() throws SQLException { + return 0; + } + + public int getMaxCatalogNameLength() throws SQLException { + return 0; + } + + public int getMaxRowSize() throws SQLException { + return 0; + } + + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return false; + } + + public int getMaxStatementLength() throws SQLException { + return 0; + } + + public int getMaxStatements() throws SQLException { + return 0; + } + + public int getMaxTableNameLength() throws SQLException { + return 0; + } + + public int getMaxTablesInSelect() throws SQLException { + return 0; + } + + public int getMaxUserNameLength() throws SQLException { + return 0; + } + + public int getDefaultTransactionIsolation() throws SQLException { + return 0; + } + + public boolean supportsTransactions() throws SQLException { + return false; + } + + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + return false; + } + + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return false; + } + + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) + throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, + String columnNamePattern) throws SQLException { + throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + } + + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) + throws SQLException { + Statement stmt = null; + if (null != conn && !conn.isClosed()) { + stmt = conn.createStatement(); + if (catalog == null || catalog.length() < 1) { + catalog = conn.getCatalog(); + } stmt.executeUpdate("use " + catalog); ResultSet resultSet0 = stmt.executeQuery("show tables"); GetTablesResultSet getTablesResultSet = new GetTablesResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, types); @@ -540,29 +540,29 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { } else { throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); } - } + } - public ResultSet getSchemas() throws SQLException { - return getEmptyResultSet(); - } + public ResultSet getSchemas() throws SQLException { + return getEmptyResultSet(); + } - public ResultSet getCatalogs() throws SQLException { + public ResultSet getCatalogs() throws SQLException { - if (conn != null && !conn.isClosed()) { - Statement stmt = conn.createStatement(); - ResultSet resultSet0 = stmt.executeQuery("show databases"); - CatalogResultSet resultSet = new CatalogResultSet(resultSet0); - return resultSet; - } else { - return getEmptyResultSet(); + if (conn != null && !conn.isClosed()) { + Statement stmt = conn.createStatement(); + ResultSet resultSet0 = stmt.executeQuery("show databases"); + CatalogResultSet resultSet = new CatalogResultSet(resultSet0); + return resultSet; + } else { + return getEmptyResultSet(); } - } + } - public ResultSet getTableTypes() throws SQLException { - DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); + public ResultSet getTableTypes() throws SQLException { + DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); - // set up ColumnMetaDataList - List columnMetaDataList = new ArrayList(1); + // set up ColumnMetaDataList + List columnMetaDataList = new ArrayList(1); ColumnMetaData colMetaData = new ColumnMetaData(); colMetaData.setColIndex(0); colMetaData.setColName("TABLE_TYPE"); @@ -582,220 +582,384 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData { resultSet.setColumnMetaDataList(columnMetaDataList); resultSet.setRowDataList(rowDataList); return resultSet; - } + } - public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) - throws SQLException { - return getEmptyResultSet(); - } + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { - public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) - throws SQLException { - return getEmptyResultSet(); - } - public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) - throws SQLException { - return getEmptyResultSet(); - } + /** add by zyyang **********/ + Statement stmt = null; + if (null != conn && !conn.isClosed()) { + stmt = conn.createStatement(); + if (catalog == null || catalog.length() < 1) { + catalog = conn.getCatalog(); + } + stmt.executeUpdate("use " + catalog); - public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) - throws SQLException { - return getEmptyResultSet(); - } + DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); + // set up ColumnMetaDataList + List columnMetaDataList = new ArrayList<>(24); + columnMetaDataList.add(null); + columnMetaDataList.add(null); + // add TABLE_NAME + ColumnMetaData colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(3); + colMetaData.setColName("TABLE_NAME"); + colMetaData.setColSize(193); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY); + columnMetaDataList.add(colMetaData); + // add COLUMN_NAME + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(4); + colMetaData.setColName("COLUMN_NAME"); + colMetaData.setColSize(65); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY); + columnMetaDataList.add(colMetaData); + // add DATA_TYPE + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(5); + colMetaData.setColName("DATA_TYPE"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + // add TYPE_NAME + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(6); + colMetaData.setColName("TYPE_NAME"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY); + columnMetaDataList.add(colMetaData); + // add COLUMN_SIZE + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(7); + colMetaData.setColName("COLUMN_SIZE"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + // add BUFFER_LENGTH ,not used + columnMetaDataList.add(null); + // add DECIMAL_DIGITS + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(9); + colMetaData.setColName("DECIMAL_DIGITS"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + // add NUM_PREC_RADIX + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(10); + colMetaData.setColName("NUM_PREC_RADIX"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); + // add NULLABLE + colMetaData = new ColumnMetaData(); + colMetaData.setColIndex(11); + colMetaData.setColName("NULLABLE"); + colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_INT); + columnMetaDataList.add(colMetaData); - public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { - return getEmptyResultSet(); - } + resultSet.setColumnMetaDataList(columnMetaDataList); - public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { - return getEmptyResultSet(); - } + // set up rowDataList + ResultSet resultSet0 = stmt.executeQuery("describe " + tableNamePattern); + List rowDataList = new ArrayList<>(); + int index = 0; + while (resultSet0.next()) { + TSDBResultSetRowData rowData = new TSDBResultSetRowData(24); + // set TABLE_NAME + rowData.setString(2, tableNamePattern); + // set COLUMN_NAME + rowData.setString(3, resultSet0.getString(1)); + // set DATA_TYPE + String typeName = resultSet0.getString(2); + rowData.setInt(4, getDataType(typeName)); + // set TYPE_NAME + rowData.setString(5, typeName); + // set COLUMN_SIZE + int length = resultSet0.getInt(3); + rowData.setInt(6, getColumnSize(typeName, length)); + // set DECIMAL_DIGITS + rowData.setInt(8, getDecimalDigits(typeName)); + // set NUM_PREC_RADIX + rowData.setInt(9, 10); + // set NULLABLE + rowData.setInt(10, getNullable(index, typeName)); + rowDataList.add(rowData); + index++; + } + resultSet.setRowDataList(rowDataList); - public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { - return getEmptyResultSet(); - } +// GetColumnsResultSet getColumnsResultSet = new GetColumnsResultSet(resultSet0, catalog, schemaPattern, tableNamePattern, columnNamePattern); +// return getColumnsResultSet; +// DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet(); + return resultSet; + } else { + throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL)); + } - public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { - return getEmptyResultSet(); - } + /*************************/ - public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, - String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { - return getEmptyResultSet(); - } +// return getEmptyResultSet(); + } - public ResultSet getTypeInfo() throws SQLException { - return getEmptyResultSet(); - } + private int getNullable(int index, String typeName) { + if (index == 0 && "TIMESTAMP".equals(typeName)) + return DatabaseMetaData.columnNoNulls; + return DatabaseMetaData.columnNullable; + } - public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) - throws SQLException { - return getEmptyResultSet(); - } + private int getColumnSize(String typeName, int length) { + switch (typeName) { + case "TIMESTAMP": + return 23; - public boolean supportsResultSetType(int type) throws SQLException { - return false; - } + default: + return 0; + } + } - public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { - return false; - } + private int getDecimalDigits(String typeName) { + switch (typeName) { + case "FLOAT": + return 5; + case "DOUBLE": + return 9; + default: + return 0; + } + } - public boolean ownUpdatesAreVisible(int type) throws SQLException { - return false; - } + private int getDataType(String typeName) { + switch (typeName) { + case "TIMESTAMP": + return Types.TIMESTAMP; + case "INT": + return Types.INTEGER; + case "BIGINT": + return Types.BIGINT; + case "FLOAT": + return Types.FLOAT; + case "DOUBLE": + return Types.DOUBLE; + case "BINARY": + return Types.BINARY; + case "SMALLINT": + return Types.SMALLINT; + case "TINYINT": + return Types.TINYINT; + case "BOOL": + return Types.BOOLEAN; + case "NCHAR": + return Types.NCHAR; + default: + return Types.NULL; + } + } - public boolean ownDeletesAreVisible(int type) throws SQLException { - return false; - } + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) + throws SQLException { + return getEmptyResultSet(); + } - public boolean ownInsertsAreVisible(int type) throws SQLException { - return false; - } + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) + throws SQLException { + return getEmptyResultSet(); + } - public boolean othersUpdatesAreVisible(int type) throws SQLException { - return false; - } + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) + throws SQLException { + return getEmptyResultSet(); + } - public boolean othersDeletesAreVisible(int type) throws SQLException { - return false; - } + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } - public boolean othersInsertsAreVisible(int type) throws SQLException { - return false; - } + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } - public boolean updatesAreDetected(int type) throws SQLException { - return false; - } + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } - public boolean deletesAreDetected(int type) throws SQLException { - return false; - } + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { + return getEmptyResultSet(); + } - public boolean insertsAreDetected(int type) throws SQLException { - return false; - } + public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, + String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { + return getEmptyResultSet(); + } - public boolean supportsBatchUpdates() throws SQLException { - return false; - } + public ResultSet getTypeInfo() throws SQLException { + return getEmptyResultSet(); + } - public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) - throws SQLException { - return getEmptyResultSet(); - } + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) + throws SQLException { + return getEmptyResultSet(); + } - public Connection getConnection() throws SQLException { - return null; - } + public boolean supportsResultSetType(int type) throws SQLException { + return false; + } - public boolean supportsSavepoints() throws SQLException { - return false; - } + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + return false; + } - public boolean supportsNamedParameters() throws SQLException { - return false; - } + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return false; + } - public boolean supportsMultipleOpenResults() throws SQLException { - return false; - } + public boolean ownDeletesAreVisible(int type) throws SQLException { + return false; + } - public boolean supportsGetGeneratedKeys() throws SQLException { - return false; - } + public boolean ownInsertsAreVisible(int type) throws SQLException { + return false; + } - public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { - return getEmptyResultSet(); - } + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } - public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { - return getEmptyResultSet(); - } + public boolean othersDeletesAreVisible(int type) throws SQLException { + return false; + } - public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, - String attributeNamePattern) throws SQLException { - return getEmptyResultSet(); - } + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } - public boolean supportsResultSetHoldability(int holdability) throws SQLException { - return false; - } + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } - public int getResultSetHoldability() throws SQLException { - return 0; - } + public boolean deletesAreDetected(int type) throws SQLException { + return false; + } - public int getDatabaseMajorVersion() throws SQLException { - return 0; - } + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } - public int getDatabaseMinorVersion() throws SQLException { - return 0; - } + public boolean supportsBatchUpdates() throws SQLException { + return false; + } - public int getJDBCMajorVersion() throws SQLException { - return 0; - } + public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) + throws SQLException { + return getEmptyResultSet(); + } - public int getJDBCMinorVersion() throws SQLException { - return 0; - } + public Connection getConnection() throws SQLException { + return null; + } - public int getSQLStateType() throws SQLException { - return 0; - } + public boolean supportsSavepoints() throws SQLException { + return false; + } - public boolean locatorsUpdateCopy() throws SQLException { - return false; - } + public boolean supportsNamedParameters() throws SQLException { + return false; + } - public boolean supportsStatementPooling() throws SQLException { - return false; - } + public boolean supportsMultipleOpenResults() throws SQLException { + return false; + } - public RowIdLifetime getRowIdLifetime() throws SQLException { - return null; - } + public boolean supportsGetGeneratedKeys() throws SQLException { + return false; + } - public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { - return null; - } + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + return getEmptyResultSet(); + } - public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { - return false; - } + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + return getEmptyResultSet(); + } - public boolean autoCommitFailureClosesAllResultSets() throws SQLException { - return false; - } + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, + String attributeNamePattern) throws SQLException { + return getEmptyResultSet(); + } - public ResultSet getClientInfoProperties() throws SQLException { - return getEmptyResultSet(); - } + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + return false; + } - public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) - throws SQLException { - return getEmptyResultSet(); - } + public int getResultSetHoldability() throws SQLException { + return 0; + } - public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, - String columnNamePattern) throws SQLException { - return getEmptyResultSet(); - } + public int getDatabaseMajorVersion() throws SQLException { + return 0; + } - public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, - String columnNamePattern) throws SQLException { - return getEmptyResultSet(); - } + public int getDatabaseMinorVersion() throws SQLException { + return 0; + } - public boolean generatedKeyAlwaysReturned() throws SQLException { - return false; - } + public int getJDBCMajorVersion() throws SQLException { + return 0; + } - private ResultSet getEmptyResultSet() { - return new EmptyResultSet(); - } + public int getJDBCMinorVersion() throws SQLException { + return 0; + } + + public int getSQLStateType() throws SQLException { + return 0; + } + + public boolean locatorsUpdateCopy() throws SQLException { + return false; + } + + public boolean supportsStatementPooling() throws SQLException { + return false; + } + + public RowIdLifetime getRowIdLifetime() throws SQLException { + return null; + } + + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + return null; + } + + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return false; + } + + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + public ResultSet getClientInfoProperties() throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) + throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, + String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, + String columnNamePattern) throws SQLException { + return getEmptyResultSet(); + } + + public boolean generatedKeyAlwaysReturned() throws SQLException { + return false; + } + + private ResultSet getEmptyResultSet() { + return new EmptyResultSet(); + } } \ No newline at end of file From d704d08918a1660711effe33fecc9e213903d990 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Sep 2020 14:11:34 +0800 Subject: [PATCH 38/42] fix queryInterval failure --- tests/pytest/query/queryInterval.py | 38 ++++++++++++++++------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/tests/pytest/query/queryInterval.py b/tests/pytest/query/queryInterval.py index db2c3fdeec..9922201604 100644 --- a/tests/pytest/query/queryInterval.py +++ b/tests/pytest/query/queryInterval.py @@ -23,35 +23,39 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.ts = 1538548685000 + self.ts = 1593548685000 def run(self): tdSql.prepare() tdSql.execute("create table st (ts timestamp, voltage int) tags (loc nchar(30))") - tdSql.execute("insert into t0 using st tags('beijing') values(now, 220) (now - 15d, 221) (now - 30d, 225) (now - 35d, 228) (now - 45d, 222)") - tdSql.execute("insert into t1 using st tags('shanghai') values(now, 220) (now - 60d, 221) (now - 50d, 225) (now - 40d, 228) (now - 20d, 222)") + tdSql.execute("insert into t0 using st tags('beijing') values(%d, 220) (%d, 221) (%d, 225) (%d, 228) (%d, 222)" + % (self.ts, self.ts + 1000000000, self.ts + 2000000000, self.ts + 3000000000, self.ts + 6000000000)) + tdSql.execute("insert into t1 using st tags('shanghai') values(%d, 220) (%d, 221) (%d, 225) (%d, 228) (%d, 222)" + % (self.ts, self.ts + 2000000000, self.ts + 4000000000, self.ts + 5000000000, self.ts + 7000000000)) tdSql.query("select avg(voltage) from st interval(1n)") tdSql.checkRows(3) - tdSql.checkData(0, 1, 223.0) - tdSql.checkData(1, 1, 225.0) - tdSql.checkData(2, 1, 220.333333) + tdSql.checkData(0, 1, 221.4) + tdSql.checkData(1, 1, 227.0) + tdSql.checkData(2, 1, 222.0) tdSql.query("select avg(voltage) from st interval(1n, 15d)") - tdSql.checkRows(3) - tdSql.checkData(0, 1, 224.8) - tdSql.checkData(1, 1, 222.666666) - tdSql.checkData(2, 1, 220.0) + tdSql.checkRows(4) + tdSql.checkData(0, 1, 220.333333) + tdSql.checkData(1, 1, 224.666666) + tdSql.checkData(2, 1, 225.0) + tdSql.checkData(3, 1, 222.0) tdSql.query("select avg(voltage) from st interval(1n, 15d) group by loc") - tdSql.checkRows(6) - tdSql.checkData(0, 1, 225.0) - tdSql.checkData(1, 1, 223.0) - tdSql.checkData(2, 1, 220.0) - tdSql.checkData(3, 1, 224.666666) - tdSql.checkData(4, 1, 222.0) - tdSql.checkData(5, 1, 220.0) + tdSql.checkRows(7) + tdSql.checkData(0, 1, 220.5) + tdSql.checkData(1, 1, 226.5) + tdSql.checkData(2, 1, 222.0) + tdSql.checkData(3, 1, 220.0) + tdSql.checkData(4, 1, 221.0) + tdSql.checkData(5, 1, 226.5) + tdSql.checkData(6, 1, 222.0) def stop(self): tdSql.close() From 61e27f7a985afb6f6ec8b67f01257a4b23aeb246 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 29 Sep 2020 14:28:02 +0800 Subject: [PATCH 39/42] TD-1530 --- src/client/src/TSDBJNIConnector.c | 6 +++--- src/client/src/tscLocal.c | 8 ++++---- src/client/src/tscSQLParser.c | 2 +- src/client/src/tscSql.c | 6 +++--- src/kit/shell/src/shellEngine.c | 16 ++++++++-------- tests/script/general/parser/first_last_query.sim | 3 +++ tests/script/general/parser/where.sim | 4 ++++ 7 files changed, 26 insertions(+), 19 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 9368faa0ee..4e2272eb05 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -490,13 +490,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn (*env)->CallVoidMethod(env, rowobj, g_rowdataSetBooleanFp, i, (jboolean)(*((char *)row[i]) == 1)); break; case TSDB_DATA_TYPE_TINYINT: - (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((char *)row[i])); + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((int8_t *)row[i])); break; case TSDB_DATA_TYPE_SMALLINT: - (*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((short *)row[i])); + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((int16_t *)row[i])); break; case TSDB_DATA_TYPE_INT: - (*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int *)row[i]); + (*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int32_t *)row[i]); break; case TSDB_DATA_TYPE_BIGINT: (*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i])); diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 030b033653..5a33e05a53 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -306,16 +306,16 @@ static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengt switch (type) { case TSDB_DATA_TYPE_BOOL: - sprintf(result, "%s", ((((int)(*((char *)val))) == 1) ? "true" : "false")); + sprintf(result, "%s", ((((int32_t)(*((char *)val))) == 1) ? "true" : "false")); break; case TSDB_DATA_TYPE_TINYINT: - sprintf(result, "%d", (int)(*((char *)val))); + sprintf(result, "%d", *((int8_t *)val)); break; case TSDB_DATA_TYPE_SMALLINT: - sprintf(result, "%d", (int)(*((short *)val))); + sprintf(result, "%d", *((int16_t *)val)); break; case TSDB_DATA_TYPE_INT: - sprintf(result, "%d", *((int *)val)); + sprintf(result, "%d", *((int32_t *)val)); break; case TSDB_DATA_TYPE_BIGINT: sprintf(result, "%"PRId64, *((int64_t *)val)); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index f690d13164..ad39f9869d 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1699,7 +1699,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col if (pItem->pNode->pParam != NULL) { tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; SStrToken* pToken = &pParamElem->pNode->colInfo; - short sqlOptr = pParamElem->pNode->nSQLOptr; + int16_t sqlOptr = pParamElem->pNode->nSQLOptr; if ((pToken->z == NULL || pToken->n == 0) && (TK_INTEGER != sqlOptr)) /*select count(1) from table*/ { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 430a762321..91f7fd4638 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -719,15 +719,15 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) switch (fields[i].type) { case TSDB_DATA_TYPE_TINYINT: - len += sprintf(str + len, "%d", *((char *)row[i])); + len += sprintf(str + len, "%d", *((int8_t *)row[i])); break; case TSDB_DATA_TYPE_SMALLINT: - len += sprintf(str + len, "%d", *((short *)row[i])); + len += sprintf(str + len, "%d", *((int16_t *)row[i])); break; case TSDB_DATA_TYPE_INT: - len += sprintf(str + len, "%d", *((int *)row[i])); + len += sprintf(str + len, "%d", *((int32_t *)row[i])); break; case TSDB_DATA_TYPE_BIGINT: diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 1d1ca1c42b..d5e826fbaa 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -419,16 +419,16 @@ static void dumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_ char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int)(*((char *)val))) == 1) ? 1 : 0)); + fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); break; case TSDB_DATA_TYPE_TINYINT: - fprintf(fp, "%d", (int)(*((char *)val))); + fprintf(fp, "%d", *((int8_t *)val)); break; case TSDB_DATA_TYPE_SMALLINT: - fprintf(fp, "%d", (int)(*((short *)val))); + fprintf(fp, "%d", *((int16_t *)val)); break; case TSDB_DATA_TYPE_INT: - fprintf(fp, "%d", *((int *)val)); + fprintf(fp, "%d", *((int32_t *)val)); break; case TSDB_DATA_TYPE_BIGINT: fprintf(fp, "%" PRId64, *((int64_t *)val)); @@ -559,16 +559,16 @@ static void printField(const char* val, TAOS_FIELD* field, int width, int32_t le char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: - printf("%*s", width, ((((int)(*((char *)val))) == 1) ? "true" : "false")); + printf("%*s", width, ((((int32_t)(*((char *)val))) == 1) ? "true" : "false")); break; case TSDB_DATA_TYPE_TINYINT: - printf("%*d", width, (int)(*((char *)val))); + printf("%*d", width, *((int8_t *)val)); break; case TSDB_DATA_TYPE_SMALLINT: - printf("%*d", width, (int)(*((short *)val))); + printf("%*d", width, *((int16_t *)val)); break; case TSDB_DATA_TYPE_INT: - printf("%*d", width, *((int *)val)); + printf("%*d", width, *((int32_t *)val)); break; case TSDB_DATA_TYPE_BIGINT: printf("%*" PRId64, width, *((int64_t *)val)); diff --git a/tests/script/general/parser/first_last_query.sim b/tests/script/general/parser/first_last_query.sim index 8127c19230..d43cd52878 100644 --- a/tests/script/general/parser/first_last_query.sim +++ b/tests/script/general/parser/first_last_query.sim @@ -14,12 +14,14 @@ $i = 0 $db = $dbPrefix . $i $stb = $stbPrefix . $i +print use $db sql use $db ##### select first/last from table ## TBASE-331 print ====== select first/last from table $tb = $tbPrefix . 0 +print select first(*) from $tb sql select first(*) from $tb if $rows != 1 then return -1 @@ -58,6 +60,7 @@ if $data09 != NCHAR then return -1 endi +print select last(*) from $tb sql select last(*) from $tb if $rows != 1 then return -1 diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index f9fd919bd6..46442e65b1 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -94,7 +94,11 @@ endi ## select specified columns +print select c1 from $mt sql select c1 from $mt + +print rows $rows +print totalNum $totalNum if $rows != $totalNum then return -1 endi From e5b2cac15243745dcbdccf3754b80ca73bf18c5d Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 29 Sep 2020 14:29:45 +0800 Subject: [PATCH 40/42] hotfix: fix some chinese link mistake using. [TD-1638] --- .../markdowndocs/Getting Started-ch.md | 6 ++-- .../webdocs/markdowndocs/Model-ch.md | 2 +- .../More on System Architecture-ch.md | 4 +-- .../webdocs/markdowndocs/architecture-ch.md | 12 ++++---- .../webdocs/markdowndocs/connector-ch.md | 2 +- .../webdocs/markdowndocs/faq-ch.md | 6 ++-- .../webdocs/markdowndocs/replica-ch.md | 28 +++++++++---------- .../webdocs/markdowndocs/taosd-ch.md | 4 +-- 8 files changed, 32 insertions(+), 32 deletions(-) diff --git a/documentation20/webdocs/markdowndocs/Getting Started-ch.md b/documentation20/webdocs/markdowndocs/Getting Started-ch.md index 210f0921ba..0e751d8cd5 100644 --- a/documentation20/webdocs/markdowndocs/Getting Started-ch.md +++ b/documentation20/webdocs/markdowndocs/Getting Started-ch.md @@ -68,7 +68,7 @@ systemctl status taosd taos ``` -如果TDengine终端链接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端链接服务端失败的问题)。TDengine终端的提示符号如下: +如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下: ```cmd taos> @@ -99,8 +99,8 @@ Query OK, 2 row(s) in set (0.001700s) - -c, --config-dir: 指定配置文件目录,默认为_/etc/taos_ - -h, --host: 指定服务的IP地址,默认为本地服务 - -s, --commands: 在不进入终端的情况下运行TDengine命令 -- -u, -- user: 链接TDengine服务器的用户名,缺省为root -- -p, --password: 链接TDengine服务器的密码,缺省为taosdata +- -u, -- user: 连接TDengine服务器的用户名,缺省为root +- -p, --password: 连接TDengine服务器的密码,缺省为taosdata - -?, --help: 打印出所有命令行参数 示例: diff --git a/documentation20/webdocs/markdowndocs/Model-ch.md b/documentation20/webdocs/markdowndocs/Model-ch.md index bf04ce8388..d698e3daaf 100644 --- a/documentation20/webdocs/markdowndocs/Model-ch.md +++ b/documentation20/webdocs/markdowndocs/Model-ch.md @@ -19,7 +19,7 @@ CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4; USE power; ``` -就当前链接里操作的库换为power,否则对具体表操作前,需要使用“库名.表名”来指定库的名字。 +就当前连接里操作的库换为power,否则对具体表操作前,需要使用“库名.表名”来指定库的名字。 **注意:** diff --git a/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md b/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md index 8e5eeee1c5..44d572268d 100644 --- a/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md +++ b/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md @@ -196,7 +196,7 @@ TDengine是基于硬件、软件系统不可靠、一定会有故障的假设进 **对外服务地址**:TDengine集群可以容纳单台、多台甚至几千台物理节点。应用只需要向集群中任何一个物理节点的publicIp发起连接即可。启动CLI应用taos时,选项-h需要提供的就是publicIp。 -**master/secondIp**:每一个dnode都需要配置一个masterIp。dnode启动后,将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp,对于集群中的第一个节点,就是它自己的privateIp。为保证连接成功,每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图链接secondIp。 +**master/secondIp**:每一个dnode都需要配置一个masterIp。dnode启动后,将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp,对于集群中的第一个节点,就是它自己的privateIp。为保证连接成功,每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图连接secondIp。 dnode启动后,会获知集群的mnode IP列表,并且定时向mnode发送状态信息。 @@ -245,4 +245,4 @@ vnode(虚拟数据节点)保存采集的时序数据,而且查询、计算都 -**Note:**目前集群功能仅仅限于企业版 \ No newline at end of file +**Note:**目前集群功能仅仅限于企业版 diff --git a/documentation20/webdocs/markdowndocs/architecture-ch.md b/documentation20/webdocs/markdowndocs/architecture-ch.md index 7dfff1f21f..c7f3eba9ef 100644 --- a/documentation20/webdocs/markdowndocs/architecture-ch.md +++ b/documentation20/webdocs/markdowndocs/architecture-ch.md @@ -84,17 +84,17 @@ TDengine 分布式架构的逻辑结构图如下: **FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。 -**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP链接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。 +**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。使用时,需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。 -**集群对外链接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,链接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。 +**集群对外连接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号)。通过命令行CLI启动应用taos时,可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号,如果端口不配置,将采用TDengine的系统配置参数serverPort。 -**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行链接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起链接,交换信息。获取mnode的EP信息有三步,1:检查mnodeEpList文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步;2:检查系统配置文件taos.cfg, 获取mnode EP配置参数first, second,如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步;3:将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后,数据节点发起链接,如果链接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但链接都仍然失败,则休眠几秒后,再进行尝试。 +**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行连接。一个数据节点启动时,将获取mnode所在的dnode的EP信息,然后与系统中的mnode建立起连接,交换信息。获取mnode的EP信息有三步,1:检查mnodeEpList文件是否存在,如果不存在或不能正常打开获得mnode EP信息,进入第二步;2:检查系统配置文件taos.cfg, 获取mnode EP配置参数first, second,如果不存在或者taos.cfg里没有这两个配置参数,或无效,进入第三步;3:将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试mnode EP列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。 **MNODE的选择:** TDengine逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的End Point, 并与获取的mnode EP List进行比对,如果在其中,该数据节点认为自己应该启动mnode模块,成为mnode。如果自己的EP不在mnode EP List里,则不启动mnode模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode有可能迁移至新的dnode,但一切都是透明的,无需人工干预,配置参数的修改,是mnode自己根据资源做出的决定。 -**新数据节点的加入**:系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用TDengine CLI链接到现有工作的数据节点,然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步:在新的数据节点的系统配置参数文件taos.cfg里,将first, second参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。 +**新数据节点的加入**:系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用TDengine CLI连接到现有工作的数据节点,然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步:在新的数据节点的系统配置参数文件taos.cfg里,将first, second参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。 -**重定向**:无论是dnode还是taosc,最先都是要发起与mnode的链接,但mnode是系统自动创建并维护的,因此对于用户来说,并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起链接即可。因为任何一个正在运行的dnode,都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的链接请求,如果自己不是mnode,则将mnode EP List回复给对方,taosc或新启动的dnode收到这个list, 就重新尝试建立链接。当mnode EP List发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知taosc。 +**重定向**:无论是dnode还是taosc,最先都是要发起与mnode的连接,但mnode是系统自动创建并维护的,因此对于用户来说,并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起连接即可。因为任何一个正在运行的dnode,都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的连接请求,如果自己不是mnode,则将mnode EP List回复给对方,taosc或新启动的dnode收到这个list, 就重新尝试建立连接。当mnode EP List发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知taosc。 ### 一个典型的消息流程 为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色,下面对写入数据这个典型操作的流程进行剖析。 @@ -197,7 +197,7 @@ Master Vnode遵循下面的写入流程: ### 主从选择 Vnode会保持一个数据版本号(Version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增一。 -一个vnode启动时,角色(master、slave) 是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立TCP链接,并互相交换status,其中包括version和自己的角色。通过status的交换,系统进入选主流程,规则如下: +一个vnode启动时,角色(master、slave) 是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立TCP连接,并互相交换status,其中包括version和自己的角色。通过status的交换,系统进入选主流程,规则如下: 1. 如果只有一个副本,该副本永远就是master 2. 所有副本都在线时,版本最高的被选为master diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index 7b6afb75a7..0e29b32487 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -142,7 +142,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine 获取最近一次API调用失败的原因,返回值为错误代码。 -**注意**:对于单个数据库连接,在同一时刻只能有一个线程使用该链接调用API,否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理。 +**注意**:对于单个数据库连接,在同一时刻只能有一个线程使用该连接调用API,否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理。 ### 异步查询API diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index 6a4d4be118..80deb889ef 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -21,7 +21,7 @@ ## 5. 遇到错误"Unable to establish connection", 我怎么办? -客户端遇到链接故障,请按照下面的步骤进行检查: +客户端遇到连接故障,请按照下面的步骤进行检查: 1. 检查网络环境 * 云服务器:检查云服务器的安全组是否打开TCP/UDP 端口6030-6042的访问权限 @@ -45,7 +45,7 @@ 9. 如果仍不能排除连接故障,请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅 检查UDP端口连接是否工作:`nc -vuz {hostIP} {port} ` 检查服务器侧TCP端口连接是否工作:`nc -l {port}` - 检查客户端侧TCP端口链接是否工作:`nc {hostIP} {port}` + 检查客户端侧TCP端口连接是否工作:`nc {hostIP} {port}` 10. 也可以使用taos程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括TCP和UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 @@ -57,7 +57,7 @@ 1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 2. 如果网络配置有DNS server, 请检查是否正常工作 3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件,查看该FQDN是否配置,并是否有正确的IP地址。 -4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法链接服务器的 +4. 如果网络配置OK,从客户端所在机器,你需要能Ping该连接的FQDN,否则客户端是无法连接服务器的 ## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误 diff --git a/documentation20/webdocs/markdowndocs/replica-ch.md b/documentation20/webdocs/markdowndocs/replica-ch.md index c32928b42d..1d80174455 100644 --- a/documentation20/webdocs/markdowndocs/replica-ch.md +++ b/documentation20/webdocs/markdowndocs/replica-ch.md @@ -66,21 +66,21 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 数据实时复制有三个主要流程:选主、数据转发、数据恢复。后续做详细讨论。 -## 虚拟节点之间的网络链接 +## 虚拟节点之间的网络连接 -虚拟节点之间通过TCP进行链接,节点之间的状态交换、数据包的转发都是通过这个TCP链接(peerFd)进行。为避免竞争,两个虚拟节点之间的TCP链接,总是由IP地址(UINT32)小的节点作为TCP客户端发起。一旦TCP链接被中断,虚拟节点能通过TCP socket自动检测到,将对方标为offline。如果监测到任何错误(比如数据恢复流程),虚拟节点将主动重置该链接。 +虚拟节点之间通过TCP进行连接,节点之间的状态交换、数据包的转发都是通过这个TCP连接(peerFd)进行。为避免竞争,两个虚拟节点之间的TCP连接,总是由IP地址(UINT32)小的节点作为TCP客户端发起。一旦TCP连接被中断,虚拟节点能通过TCP socket自动检测到,将对方标为offline。如果监测到任何错误(比如数据恢复流程),虚拟节点将主动重置该连接。 -一旦作为客户端的节点链接不成或中断,它将周期性的每隔一秒钟去试图去链接一次。因为TCP本身有心跳机制,虚拟节点之间不再另行提供心跳。 +一旦作为客户端的节点连接不成或中断,它将周期性的每隔一秒钟去试图去连接一次。因为TCP本身有心跳机制,虚拟节点之间不再另行提供心跳。 -如果一个unsynced节点要发起数据恢复流程,它与Master将建立起专有的TCP链接(syncFd)。数据恢复完成后,该链接会被关闭。而且为限制资源的使用,系统只容许一定数量(配置参数tsMaxSyncNum)的数据恢复的socket存在。如果超过这个数字,系统会将新的数据恢复请求延后处理。 +如果一个unsynced节点要发起数据恢复流程,它与Master将建立起专有的TCP连接(syncFd)。数据恢复完成后,该连接会被关闭。而且为限制资源的使用,系统只容许一定数量(配置参数tsMaxSyncNum)的数据恢复的socket存在。如果超过这个数字,系统会将新的数据恢复请求延后处理。 -任意一个节点,无论有多少虚拟节点,都会启动而且只会启动一个TCP server, 来接受来自其他虚拟节点的上述两类TCP的链接请求。当TCP socket建立起来,客户端侧发送的消息体里会带有vgId(全局唯一的vgroup ID), TCP 服务器侧会检查该vgId是否已经在该节点启动运行。如果已经启动运行,就接受其请求。如果不存在,就直接将链接请求关闭。在TDengine代码里,mnode group的vgId设置为1。 +任意一个节点,无论有多少虚拟节点,都会启动而且只会启动一个TCP server, 来接受来自其他虚拟节点的上述两类TCP的连接请求。当TCP socket建立起来,客户端侧发送的消息体里会带有vgId(全局唯一的vgroup ID), TCP 服务器侧会检查该vgId是否已经在该节点启动运行。如果已经启动运行,就接受其请求。如果不存在,就直接将连接请求关闭。在TDengine代码里,mnode group的vgId设置为1。 ## 选主流程 当同一组的两个虚拟节点之间(vnode A, vnode B)建立连接后,他们互换status消息。status消息里包含本地存储的同一虚拟节点组内所有虚拟节点的role和version。 -如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点(vnode B)的链接中断,vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息,还是检测与另外一虚拟节点的链接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下: +如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点(vnode B)的连接中断,vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息,还是检测与另外一虚拟节点的连接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下: 1. 如果检测到在线的节点数没有超过一半,则将自己的状态设置为unsynced. 2. 如果在线的虚拟节点数超过一半,会检查master节点是否存在,如果存在,则会决定是否将自己状态改为slave或启动数据恢复流程 @@ -118,7 +118,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 9. 如果quorum为1,上述6,7,8步不会发生。 10. 如果要等待slave的确认,master会启动2秒的定时器(可配置),如果超时,则认为失败。 -对于回复确认,sync模块提供的是异步回调函数,因此APP在调用syncForwardToPeer之后,无需等待,可以处理下一个操作。在Master与Slave的TCP链接管道里,可能有多个Forward消息,这些消息是严格按照应用提供的顺序排好的。对于Forward Response也是一样,TCP管道里存在多个,但都是排序好的。这个顺序,SYNC模块并没有做特别的事情,是由APP单线程顺序写来保证的(TDengine里每个vnode的写数据,都是单线程)。 +对于回复确认,sync模块提供的是异步回调函数,因此APP在调用syncForwardToPeer之后,无需等待,可以处理下一个操作。在Master与Slave的TCP连接管道里,可能有多个Forward消息,这些消息是严格按照应用提供的顺序排好的。对于Forward Response也是一样,TCP管道里存在多个,但都是排序好的。这个顺序,SYNC模块并没有做特别的事情,是由APP单线程顺序写来保证的(TDengine里每个vnode的写数据,都是单线程)。 ## 数据恢复流程 @@ -142,9 +142,9 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性
-1. 通过已经建立的TCP链接,发送sync req给master节点 -2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP链接(syncFd) -3. 新的TCP链接建立成功后,master将开始retrieve流程,对应的,vnode B将同步启动restore流程 +1. 通过已经建立的TCP连接,发送sync req给master节点 +2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd) +3. 新的TCP连接建立成功后,master将开始retrieve流程,对应的,vnode B将同步启动restore流程 4. Retrieve/Restore流程里,先处理所有archived data (vnode里的data, head, last文件),后处理WAL data。 5. 对于archived data,master将通过回调函数getFileInfo获取数据文件的基本信息,包括文件名、magic以及文件大小。 6. master 将获得的文件名、magic以及文件大小发给vnode B @@ -157,7 +157,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 1. master节点调用回调函数getWalInfo,获取WAL的文件名。 2. 如果getWalInfo返回值大于0,表示该文件还不是最后一个WAL,因此master调用sendfile一下把该文件发送给vnode B 3. 如果getWalInfo返回时为0,表示该文件是最后一个WAL,因为文件可能还处于写的状态中,sync模块要根据WAL Head的定义逐条读出记录,然后发往vnode B。 -4. vnode A读取TCP链接传来的数据,按照WAL Head,逐条读取,如果版本号比现有的大,调用回调函数writeToCache,交给应用处理。如果小,直接扔掉。 +4. vnode A读取TCP连接传来的数据,按照WAL Head,逐条读取,如果版本号比现有的大,调用回调函数writeToCache,交给应用处理。如果小,直接扔掉。 5. 上述流程循环,直到所有WAL文件都被处理完。处理完后,master就会将新来的数据包通过Forward消息转发给slave。 从同步文件启动起,sync模块会通过inotify监控所有处理过的file以及wal。一旦发现被处理过的文件有更新变化,同步流程将中止,会重新启动。因为有可能落盘操作正在进行(比如历史数据导入,内存数据落盘),把已经处理过的文件进行了修改,需要重新同步才行。 @@ -194,15 +194,15 @@ sync模块通过inotify监控LastWal文件的更新和关闭操作。而且在 因为写入失败,客户端会重新写入数据。但对于TDengine而言,是OK的。因为时序数据都是有时间戳的,时间戳相同的数据更新操作,第一次会执行,但第二次会自动扔掉。对于Meta Data(增加、删除库、表等等)的操作,也是OK的。一张表、库已经被创建或删除,再创建或删除,不会被执行的。 -在TDengine的设计里,虚拟节点与虚拟节点之间,是一个TCP链接,是一个pipeline,数据块一个接一个按顺序在这个pipeline里等待处理。一旦某个数据块的处理失败,这个链接会被重置,后续的数据块的处理都会失败。因此不会存在Pipeline里一个数据块更新失败,但下一个数据块成功的可能。 +在TDengine的设计里,虚拟节点与虚拟节点之间,是一个TCP连接,是一个pipeline,数据块一个接一个按顺序在这个pipeline里等待处理。一旦某个数据块的处理失败,这个连接会被重置,后续的数据块的处理都会失败。因此不会存在Pipeline里一个数据块更新失败,但下一个数据块成功的可能。 ## Split Brain的问题 选举流程中,有个强制要求,那就是一定有超过半数的虚拟节点在线。但是如果replication正好是偶数,这个时候,完全可能存在splt brain问题。 -为解决这个问题,TDengine提供Arbitrator的解决方法。Arbitrator是一个节点,它的任务就是接受任何虚拟节点的链接请求,并保持它。 +为解决这个问题,TDengine提供Arbitrator的解决方法。Arbitrator是一个节点,它的任务就是接受任何虚拟节点的连接请求,并保持它。 -在启动复制模块实例时,在配置参数中,应用可以提供Arbitrator的IP地址。如果是奇数个副本,复制模块不会与这个arbitrator去建立链接,但如果是偶数个副本,就会主动去建立链接。 +在启动复制模块实例时,在配置参数中,应用可以提供Arbitrator的IP地址。如果是奇数个副本,复制模块不会与这个arbitrator去建立连接,但如果是偶数个副本,就会主动去建立连接。 Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系统时,会在bin目录生成。命令行参数“-?”查看可以配置的参数,比如绑定的IP地址,监听的端口号。 diff --git a/documentation20/webdocs/markdowndocs/taosd-ch.md b/documentation20/webdocs/markdowndocs/taosd-ch.md index 8143137c79..e90bc2233f 100644 --- a/documentation20/webdocs/markdowndocs/taosd-ch.md +++ b/documentation20/webdocs/markdowndocs/taosd-ch.md @@ -13,7 +13,7 @@ taosd的启动入口是dnode模块,dnode然后启动其他模块,包括可 该模块负责taosd与taosc, 以及其他数据节点之间的通讯。TDengine没有采取标准的HTTP或gRPC等第三方工具,而是实现了自己的通讯模块RPC。 -考虑到物联网场景下,数据写入的包一般不大,因此除支持TCP链接之外,RPC还支持UDP链接。当数据包小于15K时,RPC将采用UDP方式进行链接,否则将采用TCP链接。对于查询类的消息,RPC不管包的大小,总是采取TCP链接。对于UDP链接,RPC实现了自己的超时、重传、顺序检查等机制,以保证数据可靠传输。 +考虑到物联网场景下,数据写入的包一般不大,因此除支持TCP连接之外,RPC还支持UDP连接。当数据包小于15K时,RPC将采用UDP方式进行连接,否则将采用TCP连接。对于查询类的消息,RPC不管包的大小,总是采取TCP连接。对于UDP连接,RPC实现了自己的超时、重传、顺序检查等机制,以保证数据可靠传输。 RPC模块还提供数据压缩功能,如果数据包的字节数超过系统配置参数compressMsgSize, RPC在传输中将自动压缩数据,以节省带宽。 @@ -25,7 +25,7 @@ RPC模块还提供数据压缩功能,如果数据包的字节数超过系统 - 系统的初始化,包括 - 从文件taos.cfg读取系统配置参数,从文件dnodeCfg.json读取数据节点的配置参数; - - 启动RPC模块,并建立起与taosc通讯的server链接,与其他数据节点通讯的server链接; + - 启动RPC模块,并建立起与taosc通讯的server连接,与其他数据节点通讯的server连接; - 启动并初始化dnode的内部管理, 该模块将扫描该数据节点已有的vnode,并打开它们; - 初始化可配置的模块,如mnode, http, monitor等。 - 数据节点的管理,包括 From 89779e71c991f33c00ea6f37bad65373d7ec7219 Mon Sep 17 00:00:00 2001 From: jinzhao <467847281@qq.com> Date: Tue, 29 Sep 2020 15:48:40 +0800 Subject: [PATCH 41/42] fix cross compile failed in arm32 --- src/client/src/tscFunctionImpl.c | 12 ++++++------ src/common/src/ttypes.c | 18 +++++++++--------- src/os/inc/osDir.h | 2 ++ 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index be38a7af71..60e9596ec4 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -525,7 +525,7 @@ static void do_sum(SQLFunctionCtx *pCtx) { *retVal += pCtx->preAggVals.statis.sum; } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { double *retVal = (double*) pCtx->aOutputBuf; - *retVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.sum)); + *retVal += GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum)); } } else { // computing based on the true data block void *pData = GET_INPUT_CHAR(pCtx); @@ -768,7 +768,7 @@ static void avg_function(SQLFunctionCtx *pCtx) { if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) { *pVal += pCtx->preAggVals.statis.sum; } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - *pVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.sum)); + *pVal += GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.sum)); } } else { void *pData = GET_INPUT_CHAR(pCtx); @@ -3516,12 +3516,12 @@ static void spread_function(SQLFunctionCtx *pCtx) { pInfo->max = (double)pCtx->preAggVals.statis.max; } } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - if (pInfo->min > GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.min))) { - pInfo->min = GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.min)); + if (pInfo->min > GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.min))) { + pInfo->min = GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.min)); } - if (pInfo->max < GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.max))) { - pInfo->max = GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.max)); + if (pInfo->max < GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.max))) { + pInfo->max = GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.max)); } } diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 45ec20ce45..0e20ff19ad 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -219,7 +219,7 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num } float fv = 0; - fv = GET_FLOAT_VAL(&(data[i])); + fv = GET_FLOAT_VAL((const char*)&(data[i])); dsum += fv; if (fmin > fv) { fmin = fv; @@ -233,12 +233,12 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num } double csum = 0; - csum = GET_DOUBLE_VAL(sum); + csum = GET_DOUBLE_VAL((const char *)sum); csum += dsum; #ifdef _TD_ARM_32 - SET_DOUBLE_VAL_ALIGN(sum, &csum); - SET_DOUBLE_VAL_ALIGN(max, &fmax); - SET_DOUBLE_VAL_ALIGN(min, &fmin); + SET_DOUBLE_VAL_ALIGN((const char *)sum, &csum); + SET_DOUBLE_VAL_ALIGN((const char *)max, &fmax); + SET_DOUBLE_VAL_ALIGN((const char *)min, &fmin); #else *(double*)sum = csum; *(double*)max = fmax; @@ -264,7 +264,7 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num } double dv = 0; - dv = GET_DOUBLE_VAL(&(data[i])); + dv = GET_DOUBLE_VAL((const char*)&(data[i])); dsum += dv; if (dmin > dv) { dmin = dv; @@ -278,7 +278,7 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num } double csum = 0; - csum = GET_DOUBLE_VAL(sum); + csum = GET_DOUBLE_VAL((const char *)sum); csum += dsum; @@ -504,7 +504,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) { }; case TSDB_DATA_TYPE_DOUBLE: { #ifdef _TD_ARM_32 - double dv = GET_DOUBLE_VAL(src); + double dv = GET_DOUBLE_VAL((const char *)src); SET_DOUBLE_VAL_ALIGN(val, &dv); #else *((double *)val) = GET_DOUBLE_VAL(src); @@ -579,4 +579,4 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf break; } } -} \ No newline at end of file +} diff --git a/src/os/inc/osDir.h b/src/os/inc/osDir.h index 4a522dadb5..67cfdb3b53 100644 --- a/src/os/inc/osDir.h +++ b/src/os/inc/osDir.h @@ -20,6 +20,8 @@ extern "C" { #endif +#include + // TAOS_OS_FUNC_DIR void taosRemoveDir(char *rootDir); int taosMkDir(const char *pathname, mode_t mode); From 16ea473ddd90f3d51fe0559fc6d5633cc03421c7 Mon Sep 17 00:00:00 2001 From: Hui Li Date: Tue, 29 Sep 2020 15:54:11 +0800 Subject: [PATCH 42/42] [TD-1353] --- packaging/tools/makearbi.sh | 8 ++++---- packaging/tools/makearbi_power.sh | 8 ++++---- packaging/tools/makeclient.sh | 8 ++++---- packaging/tools/makeclient_power.sh | 8 ++++---- packaging/tools/makepkg.sh | 8 ++++---- packaging/tools/makepkg_power.sh | 8 ++++---- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/packaging/tools/makearbi.sh b/packaging/tools/makearbi.sh index bc6179eff2..82808bcae9 100755 --- a/packaging/tools/makearbi.sh +++ b/packaging/tools/makearbi.sh @@ -25,9 +25,9 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TDengine-enterprise-arbitrator" + install_dir="${release_dir}/TDengine-enterprise-arbitrator-${version}" else - install_dir="${release_dir}/TDengine-arbitrator" + install_dir="${release_dir}/TDengine-arbitrator-${version}" fi # Directories and files. @@ -48,9 +48,9 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir} cd ${release_dir} if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or edge" exit 1 diff --git a/packaging/tools/makearbi_power.sh b/packaging/tools/makearbi_power.sh index 5296cc8e3f..fd50ecd438 100755 --- a/packaging/tools/makearbi_power.sh +++ b/packaging/tools/makearbi_power.sh @@ -25,9 +25,9 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/PowerDB-enterprise-arbitrator" + install_dir="${release_dir}/PowerDB-enterprise-arbitrator-${version}" else - install_dir="${release_dir}/PowerDB-arbitrator" + install_dir="${release_dir}/PowerDB-arbitrator-${version}" fi # Directories and files. @@ -48,9 +48,9 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir} cd ${release_dir} if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or edge" exit 1 diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index d69a8e6007..e17c678f26 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -32,9 +32,9 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TDengine-enterprise-client" + install_dir="${release_dir}/TDengine-enterprise-client-${version}" else - install_dir="${release_dir}/TDengine-client" + install_dir="${release_dir}/TDengine-client-${version}" fi # Directories and files. @@ -125,9 +125,9 @@ fi cd ${release_dir} if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or edge" exit 1 diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index d4be52f679..b4416a68bb 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -32,9 +32,9 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/PowerDB-enterprise-client" + install_dir="${release_dir}/PowerDB-enterprise-client-${version}" else - install_dir="${release_dir}/PowerDB-client" + install_dir="${release_dir}/PowerDB-client-${version}" fi # Directories and files. @@ -164,9 +164,9 @@ fi cd ${release_dir} if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or edge" exit 1 diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 3958cff53b..75b45b544e 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -25,9 +25,9 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/TDengine-enterprise-server" + install_dir="${release_dir}/TDengine-enterprise-server-${version}" else - install_dir="${release_dir}/TDengine-server" + install_dir="${release_dir}/TDengine-server-${version}" fi # Directories and files. @@ -138,9 +138,9 @@ fi cd ${release_dir} if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or edge" exit 1 diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index 744f78e514..3d625900c9 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -25,9 +25,9 @@ release_dir="${top_dir}/release" #package_name='linux' if [ "$verMode" == "cluster" ]; then - install_dir="${release_dir}/PowerDB-enterprise-server" + install_dir="${release_dir}/PowerDB-enterprise-server-${version}" else - install_dir="${release_dir}/PowerDB-server" + install_dir="${release_dir}/PowerDB-server-${version}" fi # Directories and files. @@ -184,9 +184,9 @@ fi cd ${release_dir} if [ "$verMode" == "cluster" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} elif [ "$verMode" == "edge" ]; then - pkg_name=${install_dir}-${version}-${osType}-${cpuType} + pkg_name=${install_dir}-${osType}-${cpuType} else echo "unknow verMode, nor cluster or edge" exit 1