From 3c566750aa5e16c75a82ec81130dd8be21a32663 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 6 Aug 2021 10:23:39 +0800 Subject: [PATCH 001/165] [TD-5765]:check max length when alter tag value --- src/client/src/tscSQLParser.c | 5 + tests/pytest/fulltest.sh | 1 + tests/pytest/tag_lite/TestModifyTag.py | 125 +++++++++++++++++++++++++ 3 files changed, 131 insertions(+) create mode 100644 tests/pytest/tag_lite/TestModifyTag.py diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 7adc0812ae..8d89b7c3cc 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5884,6 +5884,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pItem = taosArrayGet(pVarList, 1); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); + + if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) { + return invalidOperationMsg(pMsg, msg14); + } + pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 783ee98da3..3cbc80b69d 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -76,6 +76,7 @@ python3 ./test.py -f tag_lite/set.py python3 ./test.py -f tag_lite/smallint.py python3 ./test.py -f tag_lite/tinyint.py python3 ./test.py -f tag_lite/timestamp.py +python3 ./test.py -f tag_lite/TestModifyTag.py #python3 ./test.py -f dbmgmt/database-name-boundary.py python3 test.py -f dbmgmt/nanoSecondCheck.py diff --git a/tests/pytest/tag_lite/TestModifyTag.py b/tests/pytest/tag_lite/TestModifyTag.py new file mode 100644 index 0000000000..acf63695f6 --- /dev/null +++ b/tests/pytest/tag_lite/TestModifyTag.py @@ -0,0 +1,125 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" + self.numberOfTables = 10 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + + def run(self): + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # basic test for alter tags + tdSql.execute("create database tagdb ") + tdSql.execute(" use tagdb") + tdSql.execute("create table st (ts timestamp , a int) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))") + tdSql.execute("insert into t using st (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)") + tdSql.execute("alter table t set tag tg1='newtg1'") + res = tdSql.getResult("select tg1,tg2,tg3 from t") + + if res == [('newtg1', 'tg2', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + tdSql.error("alter stable st modify tag tg2 binary(2)") + tdSql.execute("alter stable st modify tag tg2 binary(30) ") + tdSql.execute("alter table t set tag tg2 = 'abcdefghijklmnopqrstuvwxyz1234'") + res = tdSql.getResult("select tg1,tg2,tg3 from t") + if res == [('newtg1', 'abcdefghijklmnopqrstuvwxyz1234', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + # test boundary about tags + tdSql.execute("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16374))") + tdSql.error("create stable stb1 (ts timestamp , a int) tags (tg1 binary(16375))") + bound_sql = "create stable stb2 (ts timestamp , a int) tags (tg1 binary(10)," + for i in range(127): + bound_sql+="tag"+str(i)+" binary(10)," + sql1 = bound_sql[:-1]+")" + tdSql.execute(sql1) + sql2 = bound_sql[:-1]+"tag127 binary(10))" + tdSql.error(sql2) + tdSql.execute("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4093))") + tdSql.error("create stable stb3 (ts timestamp , a int) tags (tg1 nchar(4094))") + tdSql.execute("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(8))") + tdSql.error("create stable stb4 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(9))") + tdSql.execute("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(2))") + tdSql.error("create stable stb5 (ts timestamp , a int) tags (tg1 nchar(4093),tag2 binary(4),tag3 binary(3))") + + tdSql.execute("create table stt (ts timestamp , a binary(100)) tags (tg1 binary(20), tg2 binary(20), tg3 binary(20))") + tdSql.execute("insert into tt using stt (tg3, tg2, tg1) tags ('tg3', 'tg2', 'tg1') values (now, 1)") + tags = "t"*16337 + sql3 = "alter table tt set tag tg1=" +"'"+tags+"'" + tdSql.error(sql3) + tdSql.execute("alter stable stt modify tag tg1 binary(16337)") + tdSql.execute(sql3) + res = tdSql.getResult("select tg1,tg2,tg3 from tt") + if res == [(tags, 'tg2', 'tg3')]: + tdLog.info(" alter tag check has pass!") + else: + tdLog.info(" alter tag failed , please check !") + + os.system("rm -rf ./tag_lite/TestModifyTag.py.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From cc69fcc49ced0ad13e3be106af5e99b88a996e4f Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 9 Aug 2021 11:01:43 +0800 Subject: [PATCH 002/165] [TD-5784]: fixed a wrong check --- src/plugins/http/src/httpUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index 27b95a4934..ade50bdad6 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -188,7 +188,7 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (cmdSize <= 0 && cmdSize > HTTP_MAX_CMD_SIZE) { + if (cmdSize <= 0 || cmdSize > HTTP_MAX_CMD_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); return false; From e554181cfb0f2641bcfcdbf4dee02ff8968099f1 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 10 Aug 2021 14:13:00 +0800 Subject: [PATCH 003/165] [TD-5921]:fix case problem in 'tbname in' syntax --- src/tsdb/src/tsdbRead.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..260c3d67dc 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -3468,6 +3468,7 @@ void filterPrepare(void* expr, void* param) { SArray *arr = (SArray *)(pCond->arr); for (size_t i = 0; i < taosArrayGetSize(arr); i++) { char* p = taosArrayGetP(arr, i); + strtolower(varDataVal(p), varDataVal(p)); taosHashPut(pObj, varDataVal(p),varDataLen(p), &dummy, sizeof(dummy)); } } else { From fbf76a3705bcb6014a8f413d0a18a553aa4f5a2e Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 10 Aug 2021 18:46:55 +0800 Subject: [PATCH 004/165] [TD-5922]: add unitest cases --- tests/pytest/query/tbname.py | 3 +++ .../script/general/parser/tbnameIn_query.sim | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/tests/pytest/query/tbname.py b/tests/pytest/query/tbname.py index 08416ba3ed..30d90b1f9d 100644 --- a/tests/pytest/query/tbname.py +++ b/tests/pytest/query/tbname.py @@ -53,6 +53,9 @@ class TDTestCase: "select * from cars where id=0 and tbname in ('carzero', 'cartwo')") tdSql.checkRows(1) + tdSql.query("select * from cars where tbname in ('carZero', 'CARONE')") + tdSql.checkRows(2) + """ tdSql.query("select * from cars where tbname like 'car%'") tdSql.checkRows(2) diff --git a/tests/script/general/parser/tbnameIn_query.sim b/tests/script/general/parser/tbnameIn_query.sim index 65bb89d549..db27886bbf 100644 --- a/tests/script/general/parser/tbnameIn_query.sim +++ b/tests/script/general/parser/tbnameIn_query.sim @@ -101,6 +101,30 @@ if $data11 != 2 then return -1 endi +## tbname in can accpet Upper case table name +sql select count(*) from $stb where tbname in ('ti_tb0', 'TI_tb1', 'TI_TB2') group by t1 order by t1 +if $rows != 3 then + return -1 +endi +if $data00 != 10 then + return -1 +endi +if $data01 != 0 then + return -1 +endi +if $data10 != 10 then + return -1 +endi +if $data11 != 1 then + return -1 +endi +if $data20 != 10 then + return -1 +endi +if $data21 != 2 then + return -1 +endi + # multiple tbname in is not allowed NOW sql_error select count(*) from $stb where tbname in ('ti_tb1', 'ti_tb300') and tbname in ('ti_tb5', 'ti_tb1000') group by t1 order by t1 asc #if $rows != 4 then From 9bafb6436eb14c6b9fbbaf4c706a42142c0be87b Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 11 Aug 2021 09:04:04 +0800 Subject: [PATCH 005/165] trigger CI --- src/client/src/tscSQLParser.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 8d89b7c3cc..afbecd3a10 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5888,7 +5888,6 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { if (IS_VAR_DATA_TYPE(pTagsSchema->type) && (pItem->pVar.nLen > pTagsSchema->bytes * TSDB_NCHAR_SIZE)) { return invalidOperationMsg(pMsg, msg14); } - pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { From 52848c4de5ac5d6415267e8a08fb7860acb8cc43 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 11 Aug 2021 14:55:45 +0800 Subject: [PATCH 006/165] [TD-5923]finish test--->where tbname in UPPER/lower/mIXed add query/queryTbnameUpperLower.py add util/common.py for adding common function in case of duplicate use --- tests/pytest/fulltest.sh | 1 + tests/pytest/query/queryTbnameUpperLower.py | 78 +++++++++++++++++++++ tests/pytest/util/common.py | 53 ++++++++++++++ 3 files changed, 132 insertions(+) create mode 100644 tests/pytest/query/queryTbnameUpperLower.py create mode 100644 tests/pytest/util/common.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index fdcd0a3c42..17c2fc6915 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -381,6 +381,7 @@ python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py python3 ./test.py -f alter/alterColMultiTimes.py +python3 ./test.py -f query/queryTbnameUpperLower.py #======================p4-end=============== diff --git a/tests/pytest/query/queryTbnameUpperLower.py b/tests/pytest/query/queryTbnameUpperLower.py new file mode 100644 index 0000000000..bd4e85c5ca --- /dev/null +++ b/tests/pytest/query/queryTbnameUpperLower.py @@ -0,0 +1,78 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.common import tdCom + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def checkStbWhereIn(self): + ''' + where in ---> upper lower mixed + ''' + tdCom.cleanTb() + table_name = tdCom.getLongName(8, "letters_mixed") + table_name_sub = f'{table_name}_sub' + tb_name_lower = table_name_sub.lower() + tb_name_upper = table_name_sub.upper() + + ## create stb and tb + tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, id int, bi1 binary(20)) tags (si1 binary(20))') + tdSql.execute(f'create table {table_name_sub}1 using {table_name} tags ("{table_name_sub}1")') + tdSql.execute(f'create table {tb_name_lower}2 using {table_name} tags ("{tb_name_lower}2")') + tdSql.execute(f'create table {tb_name_upper}3 using {table_name} tags ("{tb_name_upper}3")') + + ## insert values + tdSql.execute(f'insert into {table_name_sub}1 values (now-1s, 1, "{table_name_sub}1")') + tdSql.execute(f'insert into {tb_name_lower}2 values (now-2s, 2, "{tb_name_lower}21")') + tdSql.execute(f'insert into {tb_name_lower}2 values (now-3s, 3, "{tb_name_lower}22")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-4s, 4, "{tb_name_upper}31")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-5s, 5, "{tb_name_upper}32")') + tdSql.execute(f'insert into {tb_name_upper}3 values (now-6s, 6, "{tb_name_upper}33")') + + ## query where tbname in single + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.upper()}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub.lower()}1")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower}2")') + tdSql.checkRows(2) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_lower.upper()}2")') + tdSql.checkRows(2) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper}3")') + tdSql.checkRows(3) + tdSql.query(f'select * from {table_name} where tbname in ("{tb_name_upper.lower()}3")') + tdSql.checkRows(3) + + ## query where tbname in multi + tdSql.query(f'select * from {table_name} where id=5 and tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")') + tdSql.checkRows(1) + tdSql.query(f'select * from {table_name} where tbname in ("{table_name_sub}1", "{tb_name_lower.upper()}2", "{tb_name_upper.lower()}3")') + tdSql.checkRows(6) + + def run(self): + tdSql.prepare() + self.checkStbWhereIn() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py new file mode 100644 index 0000000000..1c7d94a8a4 --- /dev/null +++ b/tests/pytest/util/common.py @@ -0,0 +1,53 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util.sql import tdSql + +class TDCom: + def init(self, conn, logSql): + tdSql.init(conn.cursor(), logSql) + + def cleanTb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + query_sql = "show tables" + res_row_list = tdSql.query(query_sql, True) + tb_list = map(lambda x: x[0], res_row_list) + for tb in tb_list: + tdSql.execute(f'drop table if exists {tb}') + + def getLongName(self, len, mode = "mixed"): + """ + generate long name + mode could be numbers/letters/letters_mixed/mixed + """ + if mode == "numbers": + chars = ''.join(random.choice(string.digits) for i in range(len)) + elif mode == "letters": + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + elif mode == "letters_mixed": + chars = ''.join(random.choice(string.ascii_letters.upper() + string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) + return chars + + def close(self): + self.cursor.close() + +tdCom = TDCom() \ No newline at end of file From 24eda742e42011331d69fc0c183e4d20a7f6246a Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 11 Aug 2021 18:50:07 +0800 Subject: [PATCH 007/165] [TD-5918] add the configuration of max length wild cards --- packaging/cfg/taos.cfg | 3 +++ src/client/src/tscSQLParser.c | 16 ++++++++++------ src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 13 +++++++++++++ src/util/inc/tcompare.h | 2 +- src/util/src/tcompare.c | 19 ++++++++++--------- .../functions/showOfflineThresholdIs864000.py | 2 +- 7 files changed, 39 insertions(+), 17 deletions(-) diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index bbe6eae419..3ae4e9941e 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -144,6 +144,9 @@ keepColumnName 1 # max length of an SQL # maxSQLLength 65480 +# max length of WildCards +# maxWildCardsLength 100 + # the maximum number of records allowed for super table time sorting # maxNumOfOrderedRes 100000 diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..490c8be468 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -3218,7 +3218,7 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { pCmd->command = TSDB_SQL_SHOW; const char* msg1 = "invalid name"; - const char* msg2 = "pattern filter string too long"; + const char* msg2 = "wildcard string should be less than %d characters"; const char* msg3 = "database name too long"; const char* msg4 = "invalid ip address"; const char* msg5 = "database name is empty"; @@ -3262,8 +3262,10 @@ int32_t setShowInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } - if (!tscValidateTableNameLength(pCmd->payloadLen)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + if (pPattern->n > tsMaxWildCardsLen){ + char tmp[64] = {0}; + sprintf(tmp, msg2, tsMaxWildCardsLen); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), tmp); } } } else if (showType == TSDB_MGMT_TABLE_VNODES) { @@ -4394,15 +4396,17 @@ static int32_t validateNullExpr(tSqlExpr* pExpr, char* msgBuf) { // check for like expression static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { - const char* msg1 = "wildcard string should be less than 20 characters"; + const char* msg1 = "wildcard string should be less than %d characters"; const char* msg2 = "illegal column name"; tSqlExpr* pLeft = pExpr->pLeft; tSqlExpr* pRight = pExpr->pRight; if (pExpr->tokenId == TK_LIKE) { - if (pRight->value.nLen > TSDB_PATTERN_STRING_MAX_LEN) { - return invalidOperationMsg(msgBuf, msg1); + if (pRight->value.nLen > tsMaxWildCardsLen) { + char tmp[64] = {0}; + sprintf(tmp, msg1, tsMaxWildCardsLen); + return invalidOperationMsg(msgBuf, tmp); } SSchema* pSchema = tscGetTableSchema(pTableMeta); diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 7290db6ec9..25d1c90ec5 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -70,6 +70,7 @@ extern int8_t tsKeepOriginalColumnName; // client extern int32_t tsMaxSQLStringLen; +extern int32_t tsMaxWildCardsLen; extern int8_t tsTscEnableRecordSql; extern int32_t tsMaxNumOfOrderedResults; extern int32_t tsMinSlidingTime; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a58303e9fc..f9135605bb 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -25,6 +25,7 @@ #include "tutil.h" #include "tlocale.h" #include "ttimezone.h" +#include "tcompare.h" // cluster char tsFirst[TSDB_EP_LEN] = {0}; @@ -75,6 +76,7 @@ int32_t tsCompressMsgSize = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; +int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from @@ -984,6 +986,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); + cfg.option = "maxWildCardsLength"; + cfg.ptr = &tsMaxWildCardsLen; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; + cfg.minValue = 0; + cfg.maxValue = TSDB_MAX_FIELD_LEN; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_BYTE; + taosInitConfigOption(cfg); + cfg.option = "maxNumOfOrderedRes"; cfg.ptr = &tsMaxNumOfOrderedResults; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1531,6 +1543,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + assert(tsGlobalConfigNum <= TSDB_CFG_MAX_NUM); #ifdef TD_TSZ // lossy compress cfg.option = "lossyColumns"; diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 612ce7ede0..fe46f00086 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -25,7 +25,7 @@ extern "C" { #define TSDB_PATTERN_MATCH 0 #define TSDB_PATTERN_NOMATCH 1 #define TSDB_PATTERN_NOWILDCARDMATCH 2 -#define TSDB_PATTERN_STRING_MAX_LEN 20 +#define TSDB_PATTERN_STRING_MAX_LEN 100 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index e953f4c464..ebcee2edff 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -264,18 +264,19 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - - char pattern[128] = {0}; + + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN); + char *pattern = calloc(varDataLen(pRight) + 1, sizeof(char)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); size_t sz = varDataLen(pLeft); - char *buf = malloc(sz + 1); - memcpy(buf, varDataVal(pLeft), sz); + char *buf = malloc(sz + 1); + memcpy(buf, varDataVal(pLeft), sz); buf[sz] = 0; int32_t ret = patternMatch(pattern, buf, sz, &pInfo); free(buf); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } @@ -297,13 +298,13 @@ static int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - wchar_t pattern[128] = {0}; - assert(TSDB_PATTERN_STRING_MAX_LEN < 128); + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); + wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); - + int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } diff --git a/tests/pytest/functions/showOfflineThresholdIs864000.py b/tests/pytest/functions/showOfflineThresholdIs864000.py index a7a1c2bf3f..57d0b1921b 100644 --- a/tests/pytest/functions/showOfflineThresholdIs864000.py +++ b/tests/pytest/functions/showOfflineThresholdIs864000.py @@ -25,7 +25,7 @@ class TDTestCase: def run(self): tdSql.query("show variables") - tdSql.checkData(53, 1, 864000) + tdSql.checkData(54, 1, 864000) def stop(self): tdSql.close() From 70f76974067260a1e6a95760bd121829ad807269 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 11 Aug 2021 19:17:08 +0800 Subject: [PATCH 008/165] [TD-5616]like wildcard max_length test--merge to master add getVariable() to util/sql.py add 4 testcases to query/queryWildcardLength.py --- tests/pytest/fulltest.sh | 1 + tests/pytest/query/queryWildcardLength.py | 207 ++++++++++++++++++++++ tests/pytest/util/sql.py | 16 ++ 3 files changed, 224 insertions(+) create mode 100644 tests/pytest/query/queryWildcardLength.py diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 5df37bb182..8b015727ea 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -382,6 +382,7 @@ python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py python3 ./test.py -f alter/alterColMultiTimes.py +python3 ./test.py -f query/queryWildcardLength.py #======================p4-end=============== diff --git a/tests/pytest/query/queryWildcardLength.py b/tests/pytest/query/queryWildcardLength.py new file mode 100644 index 0000000000..1fc46fe7d6 --- /dev/null +++ b/tests/pytest/query/queryWildcardLength.py @@ -0,0 +1,207 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +from copy import deepcopy +import string +import random +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def cleanTb(self): + query_sql = "show stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + tdSql.execute(f'drop table if exists {stb}') + + query_sql = "show tables" + res_row_list = tdSql.query(query_sql, True) + tb_list = map(lambda x: x[0], res_row_list) + for tb in tb_list: + tdSql.execute(f'drop table if exists {tb}') + + def getLongWildcardStr(self, len=None): + """ + generate long wildcard str + """ + maxWildCardsLength = int(tdSql.getVariable('maxWildCardsLength')[0]) + if len: + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(maxWildCardsLength+1)) + return chars + + def genTableName(self): + ''' + generate table name + hp_name--->'%str' + lp_name--->'str%' + ul_name--->'st_r' + ''' + table_name = self.getLongWildcardStr() + table_name_list = list(table_name) + table_name_list.pop(-1) + + if len(table_name_list) > 1: + lp_name = deepcopy(table_name_list) + lp_name[-1] = '%' + lp_name = ''.join(lp_name) + + ul_name = list(lp_name) + ul_name[int(len(ul_name)/2)] = '_' + ul_name = ''.join(ul_name) + + table_name_list = list(table_name) + hp_name = deepcopy(table_name_list) + hp_name.pop(1) + hp_name[0] = '%' + hp_name = ''.join(hp_name) + else: + hp_name = '%' + lp_name = '%' + ul_name = '_' + return table_name, hp_name, lp_name, ul_name + + def checkRegularTableWildcardLength(self): + ''' + check regular table wildcard length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, a1 int)") + sql_list = [f'show tables like "{hp_name}"', f'show tables like "{lp_name}"', f'show tables like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'show tables like "%{hp_name}"', f'show tables like "{lp_name}%"', f'show tables like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkSuperTableWildcardLength(self): + ''' + check super table wildcard length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, c1 int) tags (t1 int)") + sql_list = [f'show stables like "{hp_name}"', f'show stables like "{lp_name}"', f'show stables like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'show stables like "%{hp_name}"', f'show stables like "{lp_name}%"', f'show stables like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkRegularWildcardSelectLength(self): + ''' + check regular table wildcard select length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + tdSql.execute(f"CREATE TABLE {table_name} (ts timestamp, bi1 binary(200), nc1 nchar(200))") + tdSql.execute(f'insert into {table_name} values (now, "{table_name}", "{table_name}")') + sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}"', + f'select * from {table_name} where bi1 like "{ul_name}"', + f'select * from {table_name} where nc1 like "{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}"', + f'select * from {table_name} where nc1 like "{ul_name}"'] + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + + exceed_sql_list = [f'select * from {table_name} where bi1 like "%{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}%"', + f'select * from {table_name} where bi1 like "{ul_name}%"', + f'select * from {table_name} where nc1 like "%{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}%"', + f'select * from {table_name} where nc1 like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def checkStbWildcardSelectLength(self): + ''' + check stb wildcard select length with % and _ + ''' + self.cleanTb() + table_name, hp_name, lp_name, ul_name = self.genTableName() + + tdSql.execute(f'CREATE TABLE {table_name} (ts timestamp, bi1 binary(200), nc1 nchar(200)) tags (si1 binary(200), sc1 nchar(200))') + tdSql.execute(f'create table {table_name}_sub1 using {table_name} tags ("{table_name}", "{table_name}")') + tdSql.execute(f'insert into {table_name}_sub1 values (now, "{table_name}", "{table_name}");') + + sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}"', + f'select * from {table_name} where bi1 like "{ul_name}"', + f'select * from {table_name} where nc1 like "{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}"', + f'select * from {table_name} where nc1 like "{ul_name}"', + f'select * from {table_name} where si1 like "{hp_name}"', + f'select * from {table_name} where si1 like "{lp_name}"', + f'select * from {table_name} where si1 like "{ul_name}"', + f'select * from {table_name} where sc1 like "{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}"', + f'select * from {table_name} where sc1 like "{ul_name}"'] + + for sql in sql_list: + tdSql.query(sql) + if len(table_name) >= 1: + tdSql.checkRows(1) + else: + tdSql.error(sql) + exceed_sql_list = [f'select * from {table_name} where bi1 like "%{hp_name}"', + f'select * from {table_name} where bi1 like "{lp_name}%"', + f'select * from {table_name} where bi1 like "{ul_name}%"', + f'select * from {table_name} where nc1 like "%{hp_name}"', + f'select * from {table_name} where nc1 like "{lp_name}%"', + f'select * from {table_name} where nc1 like "{ul_name}%"', + f'select * from {table_name} where si1 like "%{hp_name}"', + f'select * from {table_name} where si1 like "{lp_name}%"', + f'select * from {table_name} where si1 like "{ul_name}%"', + f'select * from {table_name} where sc1 like "%{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}%"', + f'select * from {table_name} where sc1 like "{ul_name}%"'] + for sql in exceed_sql_list: + tdSql.error(sql) + + def run(self): + tdSql.prepare() + self.checkRegularTableWildcardLength() + self.checkSuperTableWildcardLength() + self.checkRegularWildcardSelectLength() + self.checkStbWildcardSelectLength() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index b42af27d06..dfe1e4a582 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -81,6 +81,22 @@ class TDSql: return self.queryResult return self.queryRows + def getVariable(self, search_attr): + ''' + get variable of search_attr access "show variables" + ''' + try: + sql = 'show variables' + param_list = self.query(sql, row_tag=True) + for param in param_list: + if param[0] == search_attr: + return param[1], param_list + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + def getColNameList(self, sql, col_tag=None): self.sql = sql try: From ad01fcb88497bcbb42549d3471e8640bf871ef88 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Wed, 11 Aug 2021 08:54:43 +0800 Subject: [PATCH 009/165] [TD-5930]:_block_dist initialize client merge buffer with max steps --- src/query/src/qAggMain.c | 11 +++++++---- src/query/src/qExecutor.c | 3 +++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 9f081cfb2f..6eadedcaf3 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -4062,12 +4062,15 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD pDist->maxRows = pSrc->maxRows; pDist->minRows = pSrc->minRows; - int32_t numSteps = tsMaxRowsInFileBlock/TSDB_BLOCK_DIST_STEP_ROWS; - pDist->dataBlockInfos = taosArrayInit(numSteps, sizeof(SFileBlockInfo)); - taosArraySetSize(pDist->dataBlockInfos, numSteps); + int32_t maxSteps = TSDB_MAX_MAX_ROW_FBLOCK/TSDB_BLOCK_DIST_STEP_ROWS; + if (TSDB_MAX_MAX_ROW_FBLOCK % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++maxSteps; + } + pDist->dataBlockInfos = taosArrayInit(maxSteps, sizeof(SFileBlockInfo)); + taosArraySetSize(pDist->dataBlockInfos, maxSteps); } - size_t steps = taosArrayGetSize(pDist->dataBlockInfos); + size_t steps = taosArrayGetSize(pSrc->dataBlockInfos); for (int32_t i = 0; i < steps; ++i) { int32_t srcNumBlocks = ((SFileBlockInfo*)taosArrayGet(pSrc->dataBlockInfos, i))->numBlocksOfStep; SFileBlockInfo* blockInfo = (SFileBlockInfo*)taosArrayGet(pDist->dataBlockInfos, i); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 52e5dbb7f3..7b6dd86f22 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5024,6 +5024,9 @@ static SSDataBlock* doBlockInfoScan(void* param, bool* newgroup) { tableBlockDist.numOfTables = (int32_t)pOperator->pRuntimeEnv->tableqinfoGroupInfo.numOfTables; int32_t numRowSteps = tsMaxRowsInFileBlock / TSDB_BLOCK_DIST_STEP_ROWS; + if (tsMaxRowsInFileBlock % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++numRowSteps; + } tableBlockDist.dataBlockInfos = taosArrayInit(numRowSteps, sizeof(SFileBlockInfo)); taosArraySetSize(tableBlockDist.dataBlockInfos, numRowSteps); tableBlockDist.maxRows = INT_MIN; From 6cc49faf82fa5d1d546128756561ab3f9e377a22 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 10 Aug 2021 14:37:19 +0800 Subject: [PATCH 010/165] [TD-5931]:invalidate time range when no tables in table groups of tsdb query --- src/tsdb/src/tsdbRead.c | 12 ++++++++++++ tests/script/general/parser/interp.sim | 13 ++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..9cc9b7224c 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) { STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); + if (pNew->numOfTables == 0) { + tsdbDebug("update query time range to invalidate time window"); + + assert(taosArrayGetSize(pNew->pGroupList) == 0); + bool asc = ASCENDING_TRAVERSE(pCond->order); + if (asc) { + pCond->twindow.ekey = pCond->twindow.skey - 1; + } else { + pCond->twindow.skey = pCond->twindow.ekey - 1; + } + } + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef); pQueryHandle->loadExternalRow = true; pQueryHandle->currentLoadExternalRows = true; diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 55c5701985..7626b15647 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -68,4 +68,15 @@ print ================== server restart completed run general/parser/interp_test.sim -#system sh/exec.sh -n dnode1 -s stop -x SIGINT +print ================= TD-5931 +sql create stable st5931(ts timestamp, f int) tags(t int) +sql create table ct5931 using st5931 tags(1) +sql create table nt5931(ts timestamp, f int) +sql select interp(*) from nt5931 where ts=now +sql select interp(*) from st5931 where ts=now +sql select interp(*) from ct5931 where ts=now +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 5cb50f7922bfe7c6c38fa30e68e837763040060c Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Thu, 12 Aug 2021 10:25:06 +0800 Subject: [PATCH 011/165] fixed huffman tree memory leak --- deps/TSZ | 2 +- src/kit/taospack/taospack.c | 52 ++++++++++++++++++++++++++++++++++--- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/deps/TSZ b/deps/TSZ index 0ca5b15a8e..ceda5bf9fc 160000 --- a/deps/TSZ +++ b/deps/TSZ @@ -1 +1 @@ -Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c +Subproject commit ceda5bf9fcd7836509ac97dcc0056b3f1dd48cc5 diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index 33d779dfcf..ad188c3010 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -18,6 +18,7 @@ #include #include + #if defined(WINDOWS) int main(int argc, char *argv[]) { printf("welcome to use taospack tools v1.3 for windows.\n"); @@ -601,7 +602,6 @@ void test_threadsafe_double(int thread_count){ } - void unitTestFloat() { float ft1 [] = {1.11, 2.22, 3.333}; @@ -662,7 +662,50 @@ void unitTestFloat() { free(ft2); free(buff); free(output); - +} + +void leakFloat() { + + int cnt = sizeof(g_ft1)/sizeof(float); + float* floats = g_ft1; + int algorithm = 2; + + // compress + const char* input = (const char*)floats; + int input_len = cnt * sizeof(float); + int output_len = input_len + 1024; + char* output = (char*) malloc(output_len); + char* buff = (char*) malloc(input_len); + int buff_len = input_len; + + int ret_len = 0; + ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len); + + if(ret_len == 0) { + printf(" compress float error.\n"); + free(buff); + free(output); + return ; + } + + float* ft2 = (float*)malloc(input_len); + ret_len = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len); + if(ret_len == 0) { + printf(" decompress float error.\n"); + } + + free(ft2); + free(buff); + free(output); +} + + +void leakTest(){ + for(int i=0; i< 90000000000000; i++){ + if(i%10000==0) + printf(" ---------- %d ---------------- \n", i); + leakFloat(); + } } #define DB_CNT 500 @@ -689,7 +732,7 @@ extern char Compressor []; // ----------------- main ---------------------- // int main(int argc, char *argv[]) { - printf("welcome to use taospack tools v1.3\n"); + printf("welcome to use taospack tools v1.5\n"); //printf(" sizeof(int)=%d\n", (int)sizeof(int)); //printf(" sizeof(long)=%d\n", (int)sizeof(long)); @@ -753,6 +796,9 @@ int main(int argc, char *argv[]) { if(strcmp(argv[1], "-mem") == 0) { memTest(); } + else if(strcmp(argv[1], "-leak") == 0) { + leakTest(); + } } else{ unitTestFloat(); From 7573622bd2e2fb6a507b64d3d1926338caad0fd6 Mon Sep 17 00:00:00 2001 From: xywang Date: Thu, 12 Aug 2021 11:38:08 +0800 Subject: [PATCH 012/165] [TD-6012]: fixed no sql recordings via http while httpEnableRecordSql was on --- src/dnode/src/dnodeMain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index eac04fe7bb..5291fb73a0 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -166,7 +166,6 @@ int32_t dnodeInitSystem() { taosInitGlobalCfg(); taosReadGlobalLogCfg(); taosSetCoreDump(); - taosInitNotes(); dnodeInitTmr(); if (dnodeCreateDir(tsLogDir) < 0) { @@ -188,6 +187,8 @@ int32_t dnodeInitSystem() { dInfo("start to initialize TDengine"); + taosInitNotes(); + if (dnodeInitComponents() != 0) { return -1; } From 5b9e1cff99ee931bfc65d61c6325f15d004d2004 Mon Sep 17 00:00:00 2001 From: xywang Date: Thu, 12 Aug 2021 11:44:14 +0800 Subject: [PATCH 013/165] [TD-6012]: fixed no sql recordings via http while httpEnableRecordSql was on --- src/dnode/src/dnodeMain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 22ce6c995a..ab2fcbea6a 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -129,7 +129,6 @@ int32_t dnodeInitSystem() { taosInitGlobalCfg(); taosReadGlobalLogCfg(); taosSetCoreDump(); - taosInitNotes(); dnodeInitTmr(); if (dnodeCreateDir(tsLogDir) < 0) { @@ -151,6 +150,8 @@ int32_t dnodeInitSystem() { dInfo("start to initialize TDengine"); + taosInitNotes(); + if (dnodeInitComponents() != 0) { return -1; } From 735e6c1ea1720e0cd0f8156be797bfd48e050775 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Thu, 12 Aug 2021 11:53:53 +0800 Subject: [PATCH 014/165] fix: fix odr violation --- src/common/src/tglobal.c | 3 +++ src/tsdb/inc/tsdbFS.h | 5 ++++- src/tsdb/src/tsdbFS.c | 3 +-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a58303e9fc..b9a4f4ffea 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -26,6 +26,9 @@ #include "tlocale.h" #include "ttimezone.h" +// TSDB +bool tsdbForceKeepFile = false; + // cluster char tsFirst[TSDB_EP_LEN] = {0}; char tsSecond[TSDB_EP_LEN] = {0}; diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h index d63aeb14ac..55db8d56ff 100644 --- a/src/tsdb/inc/tsdbFS.h +++ b/src/tsdb/inc/tsdbFS.h @@ -18,6 +18,9 @@ #define TSDB_FS_VERSION 0 +// ================== TSDB global config +extern bool tsdbForceKeepFile; + // ================== CURRENT file header info typedef struct { uint32_t version; // Current file system version (relating to code) @@ -109,4 +112,4 @@ static FORCE_INLINE int tsdbUnLockFS(STsdbFS* pFs) { return 0; } -#endif /* _TD_TSDB_FS_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_FS_H_ */ diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 68450301d8..70a5262138 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -38,7 +38,6 @@ static int tsdbProcessExpiredFS(STsdbRepo *pRepo); static int tsdbCreateMeta(STsdbRepo *pRepo); // For backward compatibility -bool tsdbForceKeepFile = false; // ================== CURRENT file header info static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { int tlen = 0; @@ -1354,4 +1353,4 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired) { tsdbCloseDFileSet(&fset); } -} \ No newline at end of file +} From 7a0d2c10c530aa215169e9f737026739840a2b3a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 14:05:38 +0800 Subject: [PATCH 015/165] [td-225]merge master. --- src/connector/go | 2 +- src/connector/grafanaplugin | 2 +- src/connector/hivemq-tdengine-extension | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/connector/go b/src/connector/go index b8f76da4a7..050667e5b4 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4 +Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index a44ec1ca49..32e2c97a4c 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit a44ec1ca493ad01b2bf825b6418f69e11f548206 +Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension index ce52010141..b62a26ecc1 160000 --- a/src/connector/hivemq-tdengine-extension +++ b/src/connector/hivemq-tdengine-extension @@ -1 +1 @@ -Subproject commit ce5201014136503d34fecbd56494b67b4961056c +Subproject commit b62a26ecc164a310104df57691691b237e091c89 From 8ecda5ba5404a21b755c0d6ecada820cbbe290ac Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 10 Aug 2021 14:37:19 +0800 Subject: [PATCH 016/165] [TD-5931]:invalidate time range when no tables in table groups of tsdb query --- src/tsdb/src/tsdbRead.c | 12 ++++++++++++ tests/script/general/parser/interp.sim | 17 ++++++++++++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index e1d40aa7d0..9cc9b7224c 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -691,6 +691,18 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) { STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); + if (pNew->numOfTables == 0) { + tsdbDebug("update query time range to invalidate time window"); + + assert(taosArrayGetSize(pNew->pGroupList) == 0); + bool asc = ASCENDING_TRAVERSE(pCond->order); + if (asc) { + pCond->twindow.ekey = pCond->twindow.skey - 1; + } else { + pCond->twindow.skey = pCond->twindow.ekey - 1; + } + } + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef); pQueryHandle->loadExternalRow = true; pQueryHandle->currentLoadExternalRows = true; diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 55c5701985..4654913d1e 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -68,4 +68,19 @@ print ================== server restart completed run general/parser/interp_test.sim -#system sh/exec.sh -n dnode1 -s stop -x SIGINT +print ================= TD-5931 +sql create stable st5931(ts timestamp, f int) tags(t int) +sql create table ct5931 using st5931 tags(1) +sql create table nt5931(ts timestamp, f int) + +sql select interp(*) from nt5931 where ts=now + +sql select interp(*) from st5931 where ts=now + +sql select interp(*) from ct5931 where ts=now + +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From b874090eac8637cbfd8c03e4fbc15d590f830d2d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 14:22:49 +0800 Subject: [PATCH 017/165] [td-255] cherry pick order support. --- src/client/src/tscSQLParser.c | 85 ++++++++++++++++++++--------------- src/client/src/tscServer.c | 6 +-- src/query/inc/qExecutor.h | 2 +- src/query/src/qPlan.c | 15 ++++--- 4 files changed, 62 insertions(+), 46 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..46292ceb91 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5482,14 +5482,19 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { pQueryInfo->order.order = TSDB_ORDER_ASC; if (isTopBottomQuery(pQueryInfo)) { pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; - } else { // in case of select tbname from super_table, the defualt order column can not be the primary ts column - pQueryInfo->order.orderColId = INT32_MIN; + } else { // in case of select tbname from super_table, the default order column can not be the primary ts column + pQueryInfo->order.orderColId = INT32_MIN; // todo define a macro } /* for super table query, set default ascending order for group output */ if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; } + + if (pQueryInfo->distinct) { + pQueryInfo->order.order = TSDB_ORDER_ASC; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { @@ -5501,17 +5506,12 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - if (pQueryInfo->distinct == true) { - pQueryInfo->order.order = TSDB_ORDER_ASC; - pQueryInfo->order.orderColId = 0; - return TSDB_CODE_SUCCESS; - } - if (pSqlNode->pSortOrder == NULL) { + if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) { return TSDB_CODE_SUCCESS; } - SArray* pSortorder = pSqlNode->pSortOrder; + char* pMsgBuf = tscGetErrorMsgPayload(pCmd); + SArray* pSortOrder = pSqlNode->pSortOrder; /* * for table query, there is only one or none order option is allowed, which is the @@ -5519,19 +5519,19 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq * * for super table query, the order option must be less than 3. */ - size_t size = taosArrayGetSize(pSortorder); - if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) { + size_t size = taosArrayGetSize(pSortOrder); + if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) { if (size > 1) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg0); + return invalidOperationMsg(pMsgBuf, msg0); } } else { if (size > 2) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(pMsgBuf, msg3); } } // handle the first part of order by - tVariant* pVar = taosArrayGet(pSortorder, 0); + tVariant* pVar = taosArrayGet(pSortOrder, 0); // e.g., order by 1 asc, return directly with out further check. if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { @@ -5543,7 +5543,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + return invalidOperationMsg(pMsgBuf, msg1); } bool orderByTags = false; @@ -5555,7 +5555,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // it is a tag column if (pQueryInfo->groupbyExpr.columnInfo == NULL) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); if (relTagIndex == pColIndex->colIndex) { @@ -5576,13 +5576,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq orderByGroupbyCol = true; } } + if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(pMsgBuf, msg3); } else { // order by top/bottom result value column is not supported in case of interval query. assert(!(orderByTags && orderByTS && orderByGroupbyCol)); } - size_t s = taosArrayGetSize(pSortorder); + size_t s = taosArrayGetSize(pSortOrder); if (s == 1) { if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5601,7 +5602,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, 1); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5619,9 +5620,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq addPrimaryTsColIntoResult(pQueryInfo, pCmd); } } - } - - if (s == 2) { + } else { tVariantListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0); if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5638,22 +5637,23 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq tVariant* pVar2 = &pItem->pVar; SStrToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; if (getColumnIndexByName(&cname, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + return invalidOperationMsg(pMsgBuf, msg1); } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } else { - tVariantListItem* p1 = taosArrayGet(pSortorder, 1); + tVariantListItem* p1 = taosArrayGet(pSortOrder, 1); pQueryInfo->order.order = p1->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; } } - } else { // meter query - if (getColumnIndexByName(&columnName, pQueryInfo, &index, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); + } else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(pMsgBuf, msg1); } + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) { bool validOrder = false; SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; @@ -5661,13 +5661,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); } + if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; pQueryInfo->groupbyExpr.orderType = p1->sortOrder; - } if (isTopBottomQuery(pQueryInfo)) { @@ -5683,13 +5684,14 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, 1); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } + validOrder = true; } if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(pMsgBuf, msg2); } tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5699,6 +5701,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return TSDB_CODE_SUCCESS; } + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + } else { + // handle the temp table order by clause. You can order by any single column in case of the temp table, created by + // inner subquery. + assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1); + + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return invalidOperationMsg(pMsgBuf, msg1); + } + tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; @@ -8458,8 +8472,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg8 = "condition missing for join query"; const char* msg9 = "not support 3 level select"; - int32_t code = TSDB_CODE_SUCCESS; - + int32_t code = TSDB_CODE_SUCCESS; SSqlCmd* pCmd = &pSql->cmd; STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -8754,8 +8767,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo); pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo); pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo); - //pQueryInfo->globalMerge = tscIsTwoStageSTableQuery(pQueryInfo, 0); - pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo); pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 3e8dfac1da..e809fe3137 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -880,16 +880,16 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; SQueryAttr query = {{0}}; tscCreateQueryFromQueryInfo(pQueryInfo, &query, pSql); + query.vgId = pTableMeta->vgId; SArray* tableScanOperator = createTableScanPlan(&query); SArray* queryOperator = createExecOperatorPlan(&query); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; - SQueryTableMsg *pQueryMsg = (SQueryTableMsg *)pCmd->payload; tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version)); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 5fe68e456f..29cfa5edf1 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -417,7 +417,6 @@ typedef struct STableScanInfo { int32_t *rowCellInfoOffset; SExprInfo *pExpr; SSDataBlock block; - bool loadExternalRows; // load external rows (prev & next rows) int32_t numOfOutput; int64_t elapsedTime; @@ -570,6 +569,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput); +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SSDataBlock* doGlobalAggregate(void* param, bool* newgroup); SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup); diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index b8a5ee7699..f72f70c911 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -557,10 +557,9 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { int32_t op = 0; if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query - if (onlyQueryTags(pQueryAttr)) { - op = OP_TagScan; - taosArrayPush(plan, &op); - } + op = OP_TagScan; + taosArrayPush(plan, &op); + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); @@ -651,8 +650,14 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); } } + + // outer query order by support + int32_t orderColId = pQueryAttr->order.orderColId; + if (pQueryAttr->vgId == 0 && orderColId != PRIMARYKEY_TIMESTAMP_COL_INDEX && orderColId != INT32_MIN) { + op = OP_Order; + taosArrayPush(plan, &op); + } } - if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) { op = OP_Limit; From 6fafa5b1bef6eee56417995f85c2ce7a1d3ab17a Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Thu, 12 Aug 2021 14:59:57 +0800 Subject: [PATCH 018/165] [TD-6006] query where by column with no quotes --- tests/pytest/query/queryError.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py index ac78c0518f..e5c468600b 100644 --- a/tests/pytest/query/queryError.py +++ b/tests/pytest/query/queryError.py @@ -65,6 +65,10 @@ class TDTestCase: # TD-2208 tdSql.error("select diff(tagtype),top(tagtype,1) from dev_001") + # TD-6006 + tdSql.error("select * from dev_001 where 'name' is not null") + tdSql.error("select * from dev_001 where \"name\" = 'first'") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From f415a994fe7d6d1c0cc94ae3441b8a6709de888c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 15:18:25 +0800 Subject: [PATCH 019/165] [td-255] --- src/client/src/tscUtil.c | 1 - src/query/inc/qExecutor.h | 8 ++ src/query/inc/qExtbuffer.h | 2 + src/query/src/qExtbuffer.c | 54 +++++++ src/query/src/qPercentile.c | 2 +- src/util/inc/tcompare.h | 2 +- src/util/src/tcompare.c | 244 +++++++++++++++++++++----------- src/util/src/tskiplist.c | 2 +- src/util/tests/skiplistTest.cpp | 2 +- 9 files changed, 232 insertions(+), 85 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3c35795b0d..b393459c52 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1206,7 +1206,6 @@ void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQue createInputDataFilterInfo(px, numOfCol1, &numOfFilterCols, &pFilterInfo); SOperatorInfo* pSourceOperator = createDummyInputOperator(pSqlObjList[0], pSchema, numOfCol1, pFilterInfo, numOfFilterCols); - pOutput->precision = pSqlObjList[0]->res.precision; SSchema* schema = NULL; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 29cfa5edf1..6dca502838 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -333,6 +333,7 @@ enum OPERATOR_TYPE_E { OP_StateWindow = 22, OP_AllTimeWindow = 23, OP_AllMultiTableTimeInterval = 24, + OP_Order = 25, }; typedef struct SOperatorInfo { @@ -540,6 +541,13 @@ typedef struct SMultiwayMergeInfo { SArray *udfInfo; } SMultiwayMergeInfo; +// todo support the disk-based sort +typedef struct SOrderOperatorInfo { + int32_t colIndex; + int32_t order; + SSDataBlock *pDataBlock; +} SOrderOperatorInfo; + void appendUpstream(SOperatorInfo* p, SOperatorInfo* pUpstream); SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime); diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index cf0e8ce31a..c06fae298c 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -220,6 +220,8 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder void tOrderDescDestroy(tOrderDescriptor *pDesc); +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn); + void tColModelAppend(SColumnModel *dstModel, tFilePage *dstPage, void *srcData, int32_t srcStartRows, int32_t numOfRowsToWrite, int32_t srcCapacity); diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index cc47cc824b..91004fe707 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -1102,3 +1102,57 @@ void tOrderDescDestroy(tOrderDescriptor *pDesc) { destroyColumnModel(pDesc->pColumnModel); tfree(pDesc); } + +void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOfRows, int32_t index, __compar_fn_t compareFn) { + assert(numOfRows > 0 && numOfCols > 0 && index >= 0 && index < numOfCols); + + int32_t bytes = pSchema[index].bytes; + int32_t size = bytes + sizeof(int32_t); + + char* buf = calloc(1, size * numOfRows); + + for(int32_t i = 0; i < numOfRows; ++i) { + char* dest = buf + size * i; + memcpy(dest, pCols[index] + bytes * i, bytes); + *(int32_t*)(dest+bytes) = i; + } + + qsort(buf, numOfRows, size, compareFn); + + int32_t prevLength = 0; + char* p = NULL; + + for(int32_t i = 0; i < numOfCols; ++i) { + int32_t bytes1 = pSchema[i].bytes; + + if (i == index) { + for(int32_t j = 0; j < numOfRows; ++j){ + char* src = buf + (j * size); + char* dest = pCols[i] + (j * bytes1); + memcpy(dest, src, bytes1); + } + } else { + // make sure memory buffer is enough + if (prevLength < bytes1) { + char *tmp = realloc(p, bytes1 * numOfRows); + assert(tmp); + + p = tmp; + prevLength = bytes1; + } + + memcpy(p, pCols[i], bytes1 * numOfRows); + + for(int32_t j = 0; j < numOfRows; ++j){ + char* dest = pCols[i] + bytes1 * j; + + int32_t newPos = *(int32_t*)(buf + (j * size) + bytes); + char* src = p + (newPos * bytes1); + memcpy(dest, src, bytes1); + } + } + } + + tfree(buf); + tfree(p); +} \ No newline at end of file diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index e3326cc26b..e9022db503 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -237,7 +237,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, } pBucket->elemPerPage = (pBucket->bufPageSize - sizeof(tFilePage))/pBucket->bytes; - pBucket->comparFn = getKeyComparFunc(pBucket->type); + pBucket->comparFn = getKeyComparFunc(pBucket->type, TSDB_ORDER_ASC); pBucket->hashFunc = getHashFunc(pBucket->type); if (pBucket->hashFunc == NULL) { diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 612ce7ede0..4861779acd 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -47,7 +47,7 @@ int WCSPatternMatch(const wchar_t *pattern, const wchar_t *str, size_t size, con int32_t doCompare(const char* a, const char* b, int32_t type, size_t size); -__compar_fn_t getKeyComparFunc(int32_t keyType); +__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order); __compar_fn_t getComparFunc(int32_t type, int32_t optr); diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index e953f4c464..6e135878c1 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -16,28 +16,22 @@ #include "os.h" #include "ttype.h" #include "tcompare.h" -#include "tarray.h" #include "hash.h" -int32_t compareInt32Val(const void *pLeft, const void *pRight) { - int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes1(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 1) ? 1 : 0; } -int32_t compareInt64Val(const void *pLeft, const void *pRight) { - int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes2(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 2) ? 1 : 0; } -int32_t compareInt16Val(const void *pLeft, const void *pRight) { - int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight); - if (left > right) return 1; - if (left < right) return -1; - return 0; +int32_t setCompareBytes4(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 4) ? 1 : 0; +} + +int32_t setCompareBytes8(const void *pLeft, const void *pRight) { + return NULL != taosHashGet((SHashObj *)pRight, pLeft, 8) ? 1 : 0; } int32_t compareInt8Val(const void *pLeft, const void *pRight) { @@ -47,27 +41,76 @@ int32_t compareInt8Val(const void *pLeft, const void *pRight) { return 0; } -int32_t compareUint32Val(const void *pLeft, const void *pRight) { - int32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight); +int32_t compareInt8ValDesc(const void *pLeft, const void *pRight) { + return compareInt8Val(pRight, pLeft); +} + +int32_t compareInt16Val(const void *pLeft, const void *pRight) { + int16_t left = GET_INT16_VAL(pLeft), right = GET_INT16_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareInt16ValDesc(const void* pLeft, const void* pRight) { + return compareInt16Val(pRight, pLeft); +} + +int32_t compareInt32Val(const void *pLeft, const void *pRight) { + int32_t left = GET_INT32_VAL(pLeft), right = GET_INT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt32ValDesc(const void* pLeft, const void* pRight) { + return compareInt32Val(pRight, pLeft); +} + +int32_t compareInt64Val(const void *pLeft, const void *pRight) { + int64_t left = GET_INT64_VAL(pLeft), right = GET_INT64_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareInt64ValDesc(const void* pLeft, const void* pRight) { + return compareInt64Val(pRight, pLeft); +} + +int32_t compareUint32Val(const void *pLeft, const void *pRight) { + uint32_t left = GET_UINT32_VAL(pLeft), right = GET_UINT32_VAL(pRight); + if (left > right) return 1; + if (left < right) return -1; + return 0; +} + +int32_t compareUint32ValDesc(const void* pLeft, const void* pRight) { + return compareUint32Val(pRight, pLeft); +} + int32_t compareUint64Val(const void *pLeft, const void *pRight) { - int64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight); + uint64_t left = GET_UINT64_VAL(pLeft), right = GET_UINT64_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareUint64ValDesc(const void* pLeft, const void* pRight) { + return compareUint64Val(pRight, pLeft); +} + int32_t compareUint16Val(const void *pLeft, const void *pRight) { - int16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight); + uint16_t left = GET_UINT16_VAL(pLeft), right = GET_UINT16_VAL(pRight); if (left > right) return 1; if (left < right) return -1; return 0; } +int32_t compareUint16ValDesc(const void* pLeft, const void* pRight) { + return compareUint16Val(pRight, pLeft); +} + int32_t compareUint8Val(const void* pLeft, const void* pRight) { uint8_t left = GET_UINT8_VAL(pLeft), right = GET_UINT8_VAL(pRight); if (left > right) return 1; @@ -75,6 +118,10 @@ int32_t compareUint8Val(const void* pLeft, const void* pRight) { return 0; } +int32_t compareUint8ValDesc(const void* pLeft, const void* pRight) { + return compareUint8Val(pRight, pLeft); +} + int32_t compareFloatVal(const void *pLeft, const void *pRight) { float p1 = GET_FLOAT_VAL(pLeft); float p2 = GET_FLOAT_VAL(pRight); @@ -92,8 +139,12 @@ int32_t compareFloatVal(const void *pLeft, const void *pRight) { } if (FLT_EQUAL(p1, p2)) { return 0; - } - return FLT_GREATER(p1, p2) ? 1: -1; + } + return FLT_GREATER(p1, p2) ? 1: -1; +} + +int32_t compareFloatValDesc(const void* pLeft, const void* pRight) { + return compareFloatVal(pRight, pLeft); } int32_t compareDoubleVal(const void *pLeft, const void *pRight) { @@ -113,14 +164,18 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) { } if (FLT_EQUAL(p1, p2)) { return 0; - } - return FLT_GREATER(p1, p2) ? 1: -1; + } + return FLT_GREATER(p1, p2) ? 1: -1; +} + +int32_t compareDoubleValDesc(const void* pLeft, const void* pRight) { + return compareDoubleVal(pRight, pLeft); } int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - + if (len1 != len2) { return len1 > len2? 1:-1; } else { @@ -133,10 +188,14 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { } } +int32_t compareLenPrefixedStrDesc(const void* pLeft, const void* pRight) { + return compareLenPrefixedStr(pRight, pLeft); +} + int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { int32_t len1 = varDataLen(pLeft); int32_t len2 = varDataLen(pRight); - + if (len1 != len2) { return len1 > len2? 1:-1; } else { @@ -149,6 +208,10 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { } } +int32_t compareLenPrefixedWStrDesc(const void* pLeft, const void* pRight) { + return compareLenPrefixedWStr(pRight, pLeft); +} + /* * Compare two strings * TSDB_MATCH: Match @@ -161,33 +224,33 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { */ int patternMatch(const char *patterStr, const char *str, size_t size, const SPatternCompareInfo *pInfo) { char c, c1; - + int32_t i = 0; int32_t j = 0; - + while ((c = patterStr[i++]) != 0) { if (c == pInfo->matchAll) { /* Match "*" */ - + while ((c = patterStr[i++]) == pInfo->matchAll || c == pInfo->matchOne) { if (c == pInfo->matchOne && (j > size || str[j++] == 0)) { // empty string, return not match return TSDB_PATTERN_NOWILDCARDMATCH; } } - + if (c == 0) { return TSDB_PATTERN_MATCH; /* "*" at the end of the pattern matches */ } - + char next[3] = {toupper(c), tolower(c), 0}; while (1) { size_t n = strcspn(str, next); str += n; - + if (str[0] == 0 || (n >= size)) { break; } - + int32_t ret = patternMatch(&patterStr[i], ++str, size - n - 1, pInfo); if (ret != TSDB_PATTERN_NOMATCH) { return ret; @@ -195,18 +258,18 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat } return TSDB_PATTERN_NOWILDCARDMATCH; } - + c1 = str[j++]; - + if (j <= size) { if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { continue; } } - + return TSDB_PATTERN_NOMATCH; } - + return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } @@ -214,13 +277,13 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c wchar_t c, c1; wchar_t matchOne = L'_'; // "_" wchar_t matchAll = L'%'; // "%" - + int32_t i = 0; int32_t j = 0; - + while ((c = patterStr[i++]) != 0) { if (c == matchAll) { /* Match "%" */ - + while ((c = patterStr[i++]) == matchAll || c == matchOne) { if (c == matchOne && (j > size || str[j++] == 0)) { return TSDB_PATTERN_NOWILDCARDMATCH; @@ -229,40 +292,40 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c if (c == 0) { return TSDB_PATTERN_MATCH; } - + wchar_t accept[3] = {towupper(c), towlower(c), 0}; while (1) { size_t n = wcscspn(str, accept); - + str += n; if (str[0] == 0 || (n >= size)) { break; } - + int32_t ret = WCSPatternMatch(&patterStr[i], ++str, size - n - 1, pInfo); if (ret != TSDB_PATTERN_NOMATCH) { return ret; } } - + return TSDB_PATTERN_NOWILDCARDMATCH; } - + c1 = str[j++]; - + if (j <= size) { if (c == c1 || towlower(c) == towlower(c1) || (c == matchOne && c1 != 0)) { continue; } } - + return TSDB_PATTERN_NOMATCH; } return (str[j] == 0 || j >= size) ? TSDB_PATTERN_MATCH : TSDB_PATTERN_NOMATCH; } -static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { +int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; char pattern[128] = {0}; @@ -270,8 +333,8 @@ static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { assert(varDataLen(pRight) < 128); size_t sz = varDataLen(pLeft); - char *buf = malloc(sz + 1); - memcpy(buf, varDataVal(pLeft), sz); + char *buf = malloc(sz + 1); + memcpy(buf, varDataVal(pLeft), sz); buf[sz] = 0; int32_t ret = patternMatch(pattern, buf, sz, &pInfo); @@ -282,19 +345,15 @@ static int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { int32_t taosArrayCompareString(const void* a, const void* b) { const char* x = *(const char**)a; const char* y = *(const char**)b; - + return compareLenPrefixedStr(x, y); } -//static int32_t compareFindStrInArray(const void* pLeft, const void* pRight) { -// const SArray* arr = (const SArray*) pRight; -// return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1; -//} -static int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { - return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; +int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { + return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; } -static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { +int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; wchar_t pattern[128] = {0}; @@ -302,14 +361,37 @@ static int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); assert(varDataLen(pRight) < 128); - + int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } __compar_fn_t getComparFunc(int32_t type, int32_t optr) { __compar_fn_t comparFn = NULL; - + + if (optr == TSDB_RELATION_IN && (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR)) { + switch (type) { + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + return setCompareBytes1; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + return setCompareBytes2; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_FLOAT: + return setCompareBytes4; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: + return setCompareBytes8; + default: + assert(0); + } + } + switch (type) { case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: comparFn = compareInt8Val; break; @@ -327,13 +409,15 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } else { /* normal relational comparFn */ comparFn = compareLenPrefixedStr; } - + break; } - + case TSDB_DATA_TYPE_NCHAR: { if (optr == TSDB_RELATION_LIKE) { comparFn = compareWStrPatternComp; + } else if (optr == TSDB_RELATION_IN) { + comparFn = compareFindItemInSet; } else { comparFn = compareLenPrefixedWStr; } @@ -349,57 +433,57 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { comparFn = compareInt32Val; break; } - + return comparFn; } -__compar_fn_t getKeyComparFunc(int32_t keyType) { +__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order) { __compar_fn_t comparFn = NULL; - + switch (keyType) { case TSDB_DATA_TYPE_TINYINT: case TSDB_DATA_TYPE_BOOL: - comparFn = compareInt8Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt8Val:compareInt8ValDesc; break; case TSDB_DATA_TYPE_SMALLINT: - comparFn = compareInt16Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt16Val:compareInt16ValDesc; break; case TSDB_DATA_TYPE_INT: - comparFn = compareInt32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc; break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - comparFn = compareInt64Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt64Val:compareInt64ValDesc; break; case TSDB_DATA_TYPE_FLOAT: - comparFn = compareFloatVal; + comparFn = (order == TSDB_ORDER_ASC)? compareFloatVal:compareFloatValDesc; break; case TSDB_DATA_TYPE_DOUBLE: - comparFn = compareDoubleVal; + comparFn = (order == TSDB_ORDER_ASC)? compareDoubleVal:compareDoubleValDesc; break; case TSDB_DATA_TYPE_UTINYINT: - comparFn = compareUint8Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint8Val:compareUint8ValDesc; break; case TSDB_DATA_TYPE_USMALLINT: - comparFn = compareUint16Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint16Val:compareUint16ValDesc; break; case TSDB_DATA_TYPE_UINT: - comparFn = compareUint32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint32Val:compareUint32ValDesc; break; case TSDB_DATA_TYPE_UBIGINT: - comparFn = compareUint64Val; + comparFn = (order == TSDB_ORDER_ASC)? compareUint64Val:compareUint64ValDesc; break; case TSDB_DATA_TYPE_BINARY: - comparFn = compareLenPrefixedStr; + comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedStr:compareLenPrefixedStrDesc; break; case TSDB_DATA_TYPE_NCHAR: - comparFn = compareLenPrefixedWStr; + comparFn = (order == TSDB_ORDER_ASC)? compareLenPrefixedWStr:compareLenPrefixedWStrDesc; break; default: - comparFn = compareInt32Val; + comparFn = (order == TSDB_ORDER_ASC)? compareInt32Val:compareInt32ValDesc; break; } - + return comparFn; } @@ -433,7 +517,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { default: { // todo refactor tstr* t1 = (tstr*) f1; tstr* t2 = (tstr*) f2; - + if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } else { diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index b464519ba6..98fd9c094c 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -54,7 +54,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _ pSkipList->keyFn = fn; pSkipList->seed = rand(); if (comparFn == NULL) { - pSkipList->comparFn = getKeyComparFunc(keyType); + pSkipList->comparFn = getKeyComparFunc(keyType, TSDB_ORDER_ASC); } else { pSkipList->comparFn = comparFn; } diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp index dfbe0f6716..df4c5af5e2 100644 --- a/src/util/tests/skiplistTest.cpp +++ b/src/util/tests/skiplistTest.cpp @@ -70,7 +70,7 @@ void doubleSkipListTest() { } void randKeyTest() { - SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT), + SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT, TSDB_ORDER_ASC), false, getkey); int32_t size = 200000; From 799c642cecdcc1a660f5c6c82384806a5c51f45c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 13:50:29 +0800 Subject: [PATCH 020/165] [td-5881]: Sort the result according to any single column in the outer query result is allowed. --- src/client/inc/tscUtil.h | 11 +-- src/query/src/qExecutor.c | 158 ++++++++++++++++++++++++++++++++------ 2 files changed, 140 insertions(+), 29 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index b3674a7bf5..0b2d4fd115 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -29,15 +29,16 @@ extern "C" { #include "tsched.h" #include "tsclient.h" -#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ +#define UTIL_TABLE_IS_SUPER_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_SUPER_TABLE)) + #define UTIL_TABLE_IS_CHILD_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE)) - -#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo)\ - (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo))) -#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ +#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \ + (!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo))) + +#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \ (((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE)) #pragma pack(push,1) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index e31792398d..c124bd20fc 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -224,6 +224,7 @@ static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput); static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput); static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); +static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyAggOperatorInfo(void* param, int32_t numOfOutput); @@ -1622,12 +1623,12 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe } startPos = pSDataBlock->info.rows - 1; - + // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); } - + break; } setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); @@ -2213,6 +2214,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } break; } + case OP_StateWindow: { pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; @@ -2229,24 +2231,20 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_Filter: { // todo refactor int32_t numOfFilterCols = 0; -// if (pQueryAttr->numOfFilterCols > 0) { -// pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, -// pQueryAttr->numOfOutput, pQueryAttr->tableCols, pQueryAttr->numOfFilterCols); -// } else { - if (pQueryAttr->stableQuery) { - SColumnInfo* pColInfo = - extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols); - pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, - pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); - freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); - } else { - SColumnInfo* pColInfo = - extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); - pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, - pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); - freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); - } -// } + if (pQueryAttr->stableQuery) { + SColumnInfo* pColInfo = + extractColumnFilterInfo(pQueryAttr->pExpr3, pQueryAttr->numOfExpr3, &numOfFilterCols); + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, + pQueryAttr->numOfExpr3, pColInfo, numOfFilterCols); + freeColumnInfo(pColInfo, pQueryAttr->numOfExpr3); + } else { + SColumnInfo* pColInfo = + extractColumnFilterInfo(pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &numOfFilterCols); + pRuntimeEnv->proot = createFilterOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, + pQueryAttr->numOfOutput, pColInfo, numOfFilterCols); + freeColumnInfo(pColInfo, pQueryAttr->numOfOutput); + } + break; } @@ -2258,11 +2256,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_MultiwayMergeSort: { bool groupMix = true; - if(pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { + if (pQueryAttr->slimit.offset != 0 || pQueryAttr->slimit.limit != -1) { groupMix = false; } + pRuntimeEnv->proot = createMultiwaySortOperatorInfo(pRuntimeEnv, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, - 4096, merger, groupMix); // TODO hack it + 4096, merger, groupMix); // TODO hack it break; } @@ -2283,6 +2282,11 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf break; } + case OP_Order: { + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + break; + } + default: { assert(0); } @@ -3092,7 +3096,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa // check if this data block is required to load if ((*status) != BLK_DATA_ALL_NEEDED) { bool needFilter = true; - + // the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet, // the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { @@ -5404,6 +5408,108 @@ SOperatorInfo *createMultiwaySortOperatorInfo(SQueryRuntimeEnv *pRuntimeEnv, SEx return pOperator; } +static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) { + assert(pSrc != NULL && pDest != NULL && pDest->info.numOfCols == pSrc->info.numOfCols); + + int32_t numOfCols = pSrc->info.numOfCols; + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol2 = taosArrayGet(pDest->pDataBlock, i); + SColumnInfoData* pCol1 = taosArrayGet(pSrc->pDataBlock, i); + + int32_t newSize = (pDest->info.rows + pSrc->info.rows) * pCol2->info.bytes; + char* tmp = realloc(pCol2->pData, newSize); + if (tmp != NULL) { + pCol2->pData = tmp; + int32_t offset = pCol2->info.bytes * pDest->info.rows; + memcpy(pCol2->pData + offset, pCol1->pData, pSrc->info.rows * pCol2->info.bytes); + } else { + return TSDB_CODE_VND_OUT_OF_MEMORY; + } + } + + pDest->info.rows += pSrc->info.rows; + return TSDB_CODE_SUCCESS; +} + +static SSDataBlock* doSort(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + SOrderOperatorInfo* pInfo = pOperator->info; + + SSDataBlock* pBlock = NULL; + while(1) { + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); + pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + + // start to flush data into disk and try do multiway merge sort + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + break; + } + + int32_t code = doMergeSDatablock(pInfo->pDataBlock, pBlock); + if (code != TSDB_CODE_SUCCESS) { + // todo handle error + } + } + + int32_t numOfCols = pInfo->pDataBlock->info.numOfCols; + void** pCols = calloc(numOfCols, POINTER_BYTES); + SSchema* pSchema = calloc(numOfCols, sizeof(SSchema)); + + for(int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* p1 = taosArrayGet(pInfo->pDataBlock->pDataBlock, i); + pCols[i] = p1->pData; + pSchema[i].colId = p1->info.colId; + pSchema[i].bytes = p1->info.bytes; + pSchema[i].type = (uint8_t) p1->info.type; + } + + __compar_fn_t comp = getKeyComparFunc(pSchema[pInfo->colIndex].type, pInfo->order); + taoscQSort(pCols, pSchema, numOfCols, pInfo->pDataBlock->info.rows, pInfo->colIndex, comp); + + tfree(pCols); + tfree(pSchema); + return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL; +} + +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo)); + + { + SSDataBlock* pDataBlock = calloc(1, sizeof(SSDataBlock)); + pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); + for(int32_t i = 0; i < numOfOutput; ++i) { + SColumnInfoData col = {{0}}; + col.info.bytes = pExpr->base.resBytes; + col.info.colId = pExpr->base.resColId; + col.info.type = pExpr->base.resType; + taosArrayPush(pDataBlock->pDataBlock, &col); + } + + pDataBlock->info.numOfCols = numOfOutput; + pInfo->pDataBlock = pDataBlock; + } + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + pOperator->name = "InMemoryOrder"; + pOperator->operatorType = OP_Order; + pOperator->blockingOptr = true; + pOperator->status = OP_IN_EXECUTING; + pOperator->info = pInfo; + pOperator->exec = doSort; + pOperator->cleanup = destroyOrderOperatorInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + + appendUpstream(pOperator, upstream); + return pOperator; +} + static int32_t getTableScanOrder(STableScanInfo* pTableScanInfo) { return pTableScanInfo->order; } @@ -6404,6 +6510,11 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) { pInfo->pRes = destroyOutputBuf(pInfo->pRes); } +static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) { + SOrderOperatorInfo* pInfo = (SOrderOperatorInfo*) param; + pInfo->pDataBlock = destroyOutputBuf(pInfo->pDataBlock); +} + static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { SFilterOperatorInfo* pInfo = (SFilterOperatorInfo*) param; doDestroyFilterInfo(pInfo->pFilterInfo, pInfo->numOfFilterCols); @@ -6752,7 +6863,6 @@ SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorIn pOperator->numOfOutput = numOfOutput; pOperator->info = pInfo; pOperator->pRuntimeEnv = pRuntimeEnv; - pOperator->exec = doFill; pOperator->cleanup = destroySFillOperatorInfo; From c0e81449bfc86d24b2d77cef25c5a7a417c7924e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 12 Aug 2021 15:36:29 +0800 Subject: [PATCH 021/165] [TD-6013]: taosdemo buffer overflow. (#7317) --- src/kit/taosdemo/taosdemo.c | 49 +++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 27b26e7364..bd0feaeb92 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5101,21 +5101,27 @@ static int64_t generateStbRowData( int64_t dataLen = 0; char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; + int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", strlen("BINARY"))) + "BINARY", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", strlen("NCHAR")))) { + "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } + if ((stbInfo->columns[i].dataLen + 1) > + /* need count 3 extra chars \', \', and , */ + (remainderBufLen - dataLen - 3)) { + return 0; + } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); @@ -5129,19 +5135,20 @@ static int64_t generateStbRowData( char *tmp; if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", strlen("INT"))) { + "INT", 3)) { if ((g_args.demo_mode) && (i == 1)) { tmp = demo_voltage_int_str(); } else { tmp = rand_int_str(); } - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", strlen("BIGINT"))) { + "BIGINT", 6)) { tmp = rand_bigint_str(); tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", strlen("FLOAT"))) { + "FLOAT", 5)) { if (g_args.demo_mode) { if (i == 0) { tmp = demo_current_float_str(); @@ -5151,27 +5158,33 @@ static int64_t generateStbRowData( } else { tmp = rand_float_str(); } - tstrncpy(pstr + dataLen, tmp, FLOAT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", strlen("DOUBLE"))) { + "DOUBLE", 6)) { tmp = rand_double_str(); - tstrncpy(pstr + dataLen, tmp, DOUBLE_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", strlen("SMALLINT"))) { + "SMALLINT", 8)) { tmp = rand_smallint_str(); - tstrncpy(pstr + dataLen, tmp, SMALLINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", strlen("TINYINT"))) { + "TINYINT", 7)) { tmp = rand_tinyint_str(); - tstrncpy(pstr + dataLen, tmp, TINYINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", strlen("BOOL"))) { + "BOOL", 4)) { tmp = rand_bool_str(); - tstrncpy(pstr + dataLen, tmp, BOOL_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { + "TIMESTAMP", 9)) { tmp = rand_int_str(); - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -5182,7 +5195,7 @@ static int64_t generateStbRowData( dataLen += 1; } - if (dataLen > (remainderBufLen - (DOUBLE_BUFF_LEN + 1))) + if (dataLen > (remainderBufLen - (128))) return 0; } From bf3477e7c3d16eda61785080124498fe47297f24 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 16:03:49 +0800 Subject: [PATCH 022/165] [td-5881]: Sort the result according to any single column in the outer query result is allowed. --- src/query/inc/qExecutor.h | 2 +- src/query/src/qExecutor.c | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 6dca502838..d30971ab47 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -577,7 +577,7 @@ SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperator int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); SOperatorInfo* createJoinOperatorInfo(SOperatorInfo** pUpstream, int32_t numOfUpstream, SSchema* pSchema, int32_t numOfOutput); -SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal); SSDataBlock* doGlobalAggregate(void* param, bool* newgroup); SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index c124bd20fc..55a762d809 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2283,7 +2283,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } case OP_Order: { - pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + pRuntimeEnv->proot = createOrderOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput, &pQueryAttr->order); break; } @@ -5428,6 +5428,7 @@ static int32_t doMergeSDatablock(SSDataBlock* pDest, SSDataBlock* pSrc) { } pDest->info.rows += pSrc->info.rows; + return TSDB_CODE_SUCCESS; } @@ -5478,7 +5479,7 @@ static SSDataBlock* doSort(void* param, bool* newgroup) { return (pInfo->pDataBlock->info.rows > 0)? pInfo->pDataBlock:NULL; } -SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { +SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SOrderVal* pOrderVal) { SOrderOperatorInfo* pInfo = calloc(1, sizeof(SOrderOperatorInfo)); { @@ -5486,10 +5487,14 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI pDataBlock->pDataBlock = taosArrayInit(numOfOutput, sizeof(SColumnInfoData)); for(int32_t i = 0; i < numOfOutput; ++i) { SColumnInfoData col = {{0}}; - col.info.bytes = pExpr->base.resBytes; - col.info.colId = pExpr->base.resColId; - col.info.type = pExpr->base.resType; + col.info.colId = pExpr[i].base.colInfo.colId; + col.info.bytes = pExpr[i].base.colBytes; + col.info.type = pExpr[i].base.colType; taosArrayPush(pDataBlock->pDataBlock, &col); + + if (col.info.colId == pOrderVal->orderColId) { + pInfo->colIndex = i; + } } pDataBlock->info.numOfCols = numOfOutput; From 977b2202eaf775b0d33d95b0ea9f33887b14028e Mon Sep 17 00:00:00 2001 From: cpwu Date: Thu, 12 Aug 2021 16:21:14 +0800 Subject: [PATCH 023/165] [TD-5935] add case for TD-5935 --- tests/pytest/functions/queryTestCases.py | 394 ++++++++++++++++++++++- 1 file changed, 392 insertions(+), 2 deletions(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index b7480fdbd5..ae2bbc4a81 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -13,6 +13,8 @@ import sys import subprocess +import random +import math from util.log import * from util.cases import * @@ -106,6 +108,9 @@ class TDTestCase: tdSql.execute("drop database if exists db1") tdSql.execute("create database if not exists db keep 3650") tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") @@ -122,6 +127,14 @@ class TDTestCase: # p1 不进入指定数据库 tdSql.query("show create database db") tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) tdSql.error("show create database ") tdSql.error("show create databases db ") tdSql.error("show create database db.stb1") @@ -340,17 +353,394 @@ class TDTestCase: pass + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + for j in range(100): + tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + for i in range(3): + tdSql.query("show vgroups") + if tdSql.getData(0, 6) == 1: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + if i == 3: + tdLog.exit("compacting not occured") + time.sleep(0.5) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(1000000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=2000000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 int) tags(t2 binary(16), t3 binary(16), t4 int)") + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i%7}, {(i-1)%7}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3}, {(i-2)%3})") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i}', '{100-i}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, {(i+1)%3})") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i*2}, {(i+2)%3})") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i*3}, {(i)%3})") + + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(14) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(14) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(7) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + # tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + # tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.query("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5798==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + + def run(self): # master branch # self.td3690() # self.td4082() # self.td4288() - self.td4724() + # self.td4724() + # self.td5798() + self.td5935() # develop branch # self.td4097() - + # self.td4889() + # self.td5168() + # self.td5433() def stop(self): tdSql.close() From 4943f4ebc47261f4a2f53812fcd01fdd152903d1 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 12 Aug 2021 16:21:48 +0800 Subject: [PATCH 024/165] [td-5881]fix bug in TD-5881 --- src/query/src/qExecutor.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 55a762d809..7088d58830 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -5498,6 +5498,7 @@ SOperatorInfo *createOrderOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI } pDataBlock->info.numOfCols = numOfOutput; + pInfo->order = pOrderVal->order; pInfo->pDataBlock = pDataBlock; } From 932d065da5de3df129a02813852493dc56d50ac8 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Thu, 12 Aug 2021 16:45:41 +0800 Subject: [PATCH 025/165] [TD-6032]: fix nanosecond 999999999 error for nodejs [ci skip] (#7329) --- src/connector/nodejs/nodetaos/taosobjects.js | 3 ++- src/connector/nodejs/package.json | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js index 0fc8dc8ef1..3bc0fe0aca 100644 --- a/src/connector/nodejs/nodetaos/taosobjects.js +++ b/src/connector/nodejs/nodetaos/taosobjects.js @@ -47,7 +47,8 @@ class TaosTimestamp extends Date { super(Math.floor(date / 1000)); this.precisionExtras = date % 1000; } else if (precision === 2) { - super(parseInt(date / 1000000)); + // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected + super(parseInt(BigInt(date) / 1000000n)); // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) this.precisionExtras = parseInt(BigInt(date) % 1000000n); } else { diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index db37318a16..6a2c66100b 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td2.0-connector", - "version": "2.0.9", + "version": "2.0.10", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "directories": { From ea16e9c73c1ed0e8038bbc6042e46e0d760602b7 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 12 Aug 2021 17:11:29 +0800 Subject: [PATCH 026/165] [TD-5835]: add test case for daily performance test --- tests/perftest-scripts/perftest-query.sh | 7 +++++++ tests/pytest/tools/taosdemoPerformance.py | 3 +-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 5b2c860122..d4853c0825 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -101,7 +101,14 @@ function runQueryPerfTest { python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT } diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index c28f94b3db..4a5abd49d8 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -63,7 +63,7 @@ class taosdemoPerformace: "batch_create_tbl_num": 10, "insert_mode": "taosc", "insert_rows": self.numOfRows, - "interlace_rows": 100, + "interlace_rows": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -172,7 +172,6 @@ class taosdemoPerformace: cursor.execute("create database if not exists %s" % self.dbName) cursor.execute("use %s" % self.dbName) cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20), numoftables int, numofrows int, numofint int, numofdouble int, numofbinary int)") - print("==================== taosdemo performance ====================") print("create tables time: %f" % float(self.createTableTime)) print("insert records time: %f" % float(self.insertRecordsTime)) print("records per second: %f" % float(self.recordsPerSecond)) From 7e9be7c5635d7c8d728b8796f609bdfab4b7309a Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Thu, 12 Aug 2021 17:33:45 +0800 Subject: [PATCH 027/165] add version to trigger pr --- src/kit/taospack/taospack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index ad188c3010..aa0bf4d485 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -732,7 +732,7 @@ extern char Compressor []; // ----------------- main ---------------------- // int main(int argc, char *argv[]) { - printf("welcome to use taospack tools v1.5\n"); + printf("welcome to use taospack tools v1.6\n"); //printf(" sizeof(int)=%d\n", (int)sizeof(int)); //printf(" sizeof(long)=%d\n", (int)sizeof(long)); From 1b7861f8fd30b3171e9ce1781aeab82e2eaabee5 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 12 Aug 2021 18:06:51 +0800 Subject: [PATCH 028/165] [TD-6013]: taosdemo buffer overflow. (#7319) --- src/kit/taosdemo/taosdemo.c | 49 +++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index c895545b81..c255ea6841 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5101,21 +5101,27 @@ static int64_t generateStbRowData( int64_t dataLen = 0; char *pstr = recBuf; int64_t maxLen = MAX_DATA_SIZE; + int tmpLen; dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ",", timestamp); for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", strlen("BINARY"))) + "BINARY", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", strlen("NCHAR")))) { + "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } + if ((stbInfo->columns[i].dataLen + 1) > + /* need count 3 extra chars \', \', and , */ + (remainderBufLen - dataLen - 3)) { + return 0; + } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); @@ -5129,19 +5135,20 @@ static int64_t generateStbRowData( char *tmp; if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", strlen("INT"))) { + "INT", 3)) { if ((g_args.demo_mode) && (i == 1)) { tmp = demo_voltage_int_str(); } else { tmp = rand_int_str(); } - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, INT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", strlen("BIGINT"))) { + "BIGINT", 6)) { tmp = rand_bigint_str(); tstrncpy(pstr + dataLen, tmp, BIGINT_BUFF_LEN); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", strlen("FLOAT"))) { + "FLOAT", 5)) { if (g_args.demo_mode) { if (i == 0) { tmp = demo_current_float_str(); @@ -5151,27 +5158,33 @@ static int64_t generateStbRowData( } else { tmp = rand_float_str(); } - tstrncpy(pstr + dataLen, tmp, FLOAT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, FLOAT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", strlen("DOUBLE"))) { + "DOUBLE", 6)) { tmp = rand_double_str(); - tstrncpy(pstr + dataLen, tmp, DOUBLE_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, DOUBLE_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", strlen("SMALLINT"))) { + "SMALLINT", 8)) { tmp = rand_smallint_str(); - tstrncpy(pstr + dataLen, tmp, SMALLINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", strlen("TINYINT"))) { + "TINYINT", 7)) { tmp = rand_tinyint_str(); - tstrncpy(pstr + dataLen, tmp, TINYINT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, TINYINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", strlen("BOOL"))) { + "BOOL", 4)) { tmp = rand_bool_str(); - tstrncpy(pstr + dataLen, tmp, BOOL_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { + "TIMESTAMP", 9)) { tmp = rand_int_str(); - tstrncpy(pstr + dataLen, tmp, INT_BUFF_LEN); + tmpLen = strlen(tmp); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -5182,7 +5195,7 @@ static int64_t generateStbRowData( dataLen += 1; } - if (dataLen > (remainderBufLen - (DOUBLE_BUFF_LEN + 1))) + if (dataLen > (remainderBufLen - (128))) return 0; } From b61eaa10402dfb6a966ac63647914f04ec5ee7ec Mon Sep 17 00:00:00 2001 From: wpan Date: Fri, 13 Aug 2021 09:34:39 +0800 Subject: [PATCH 029/165] fix stddev query condition column issue and add test case --- src/client/src/tscSubquery.c | 4 ++++ tests/script/general/parser/function.sim | 22 ++++++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 2e7e02cfd5..293fe18c77 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2397,6 +2397,10 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { } else { SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex}; tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss); + int32_t ti = tscColumnExists(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid); + assert(ti >= 0); + SColumn* x = taosArrayGetP(pNewQueryInfo->colList, ti); + tscColumnCopy(x, pCol); } } } diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 0c93fe919a..e9e907f97a 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -1149,9 +1149,11 @@ endi sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s)); -sql create table smeters (ts timestamp, current float, voltage int); -sql insert into smeters values ('2021-08-08 10:10:10', 10, 1); -sql insert into smeters values ('2021-08-08 10:10:12', 10, 2); +sql create table smeters (ts timestamp, current float, voltage int) tags (t1 int); +sql create table smeter1 using smeters tags (1); +sql insert into smeter1 values ('2021-08-08 10:10:10', 10, 2); +sql insert into smeter1 values ('2021-08-08 10:10:12', 10, 2); +sql insert into smeter1 values ('2021-08-08 10:10:14', 20, 1); sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a); if $rows != 2 then @@ -1160,9 +1162,21 @@ endi if $data00 != @21-08-08 10:10:10.000@ then return -1 endi +if $data01 != 0.000000000 then + return -1 +endi if $data10 != @21-08-08 10:10:12.000@ then return -1 endi +if $data11 != 0.000000000 then + return -1 +endi - +sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10; +if $rows != 1 then + return -1 +endi +if $data00 != 0.000000000 then + return -1 +endi From 356018a99057a3deedfd3d4b366ce1f4e0a61e87 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 13 Aug 2021 09:38:01 +0800 Subject: [PATCH 030/165] Hotfix/sangshuduo/td 5844 cmdline parameters align for master (#7337) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py --- src/kit/taosdemo/taosdemo.c | 125 ++++++++++++------ .../taosdemoTestSupportNanoInsert.py | 73 +++++----- .../taosdemoTestSupportNanoInsert.py | 2 +- tests/pytest/tools/taosdemoTestInterlace.py | 2 +- 4 files changed, 126 insertions(+), 76 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index bd0feaeb92..aad2fe95fa 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -77,7 +77,7 @@ extern char configDir[]; #define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) #define MAX_USERNAME_SIZE 64 -#define MAX_PASSWORD_SIZE 64 +#define MAX_PASSWORD_SIZE 16 #define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space @@ -216,7 +216,7 @@ typedef struct SArguments_S { uint16_t port; uint16_t iface; char * user; - char * password; + char password[MAX_PASSWORD_SIZE]; char * database; int replica; char * tb_prefix; @@ -709,24 +709,24 @@ static void printHelp() { printf("%s%s%s%s\n", indent, "-u", indent, "The TDengine user name to use when connecting to the server. Default is 'root'."); #ifdef _TD_POWER_ - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'powerdb'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/power/'."); #elif (_TD_TQ_ == true) - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'tqueue'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/tq/'."); #else - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The password to use when connecting to the server. Default is 'taosdata'."); printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/taos/'."); #endif printf("%s%s%s%s\n", indent, "-h", indent, "The host to connect to TDengine. Default is localhost."); - printf("%s%s%s%s\n", indent, "-p", indent, + printf("%s%s%s%s\n", indent, "-P", indent, "The TCP/IP port number to use for the connection. Default is 0."); printf("%s%s%s%s\n", indent, "-I", indent, #if STMT_IFACE_ENABLED == 1 @@ -826,11 +826,11 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->host = argv[++i]; - } else if (strcmp(argv[i], "-p") == 0) { + } else if (strcmp(argv[i], "-P") == 0) { if ((argc == i+1) || (!isStringNumber(argv[i+1]))) { printHelp(); - errorPrint("%s", "\n\t-p need a number following!\n"); + errorPrint("%s", "\n\t-P need a number following!\n"); exit(EXIT_FAILURE); } arguments->port = atoi(argv[++i]); @@ -860,13 +860,13 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->user = argv[++i]; - } else if (strcmp(argv[i], "-P") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-P need a valid string following!\n"); - exit(EXIT_FAILURE); + } else if (strncmp(argv[i], "-p", 2) == 0) { + if (strlen(argv[i]) == 2) { + printf("Enter password:"); + scanf("%s", arguments->password); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } - arguments->password = argv[++i]; } else if (strcmp(argv[i], "-o") == 0) { if (argc == i+1) { printHelp(); @@ -1065,7 +1065,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->debug_print = true; } else if (strcmp(argv[i], "-gg") == 0) { arguments->verbose_print = true; - } else if (strcmp(argv[i], "-pp") == 0) { + } else if (strcmp(argv[i], "-PP") == 0) { arguments->performance_print = true; } else if (strcmp(argv[i], "-O") == 0) { if ((argc == i+1) || @@ -2319,15 +2319,15 @@ static void printfDbInfoForQueryToFile( } static void printfQuerySystemInfo(TAOS * taos) { - char filename[BUFFER_SIZE+1] = {0}; - char buffer[BUFFER_SIZE+1] = {0}; + char filename[MAX_FILE_NAME_LEN] = {0}; + char buffer[1024] = {0}; TAOS_RES* res; time_t t; struct tm* lt; time(&t); lt = localtime(&t); - snprintf(filename, BUFFER_SIZE, "querySystemInfo-%d-%d-%d %d:%d:%d", + snprintf(filename, MAX_FILE_NAME_LEN, "querySystemInfo-%d-%d-%d %d:%d:%d", lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, lt->tm_sec); @@ -2359,12 +2359,12 @@ static void printfQuerySystemInfo(TAOS * taos) { printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups - snprintf(buffer, BUFFER_SIZE, "show %s.vgroups;", dbInfos[i]->name); + snprintf(buffer, 1024, "show %s.vgroups;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); // show db.stables - snprintf(buffer, BUFFER_SIZE, "show %s.stables;", dbInfos[i]->name); + snprintf(buffer, 1024, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); free(dbInfos[i]); @@ -2713,7 +2713,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { - char command[BUFFER_SIZE] = "\0"; + char command[1024] = "\0"; char limitBuf[100] = "\0"; TAOS_RES * res; @@ -2727,7 +2727,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } //get all child table name use cmd: select tbname from superTblName; - snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", + snprintf(command, 1024, "select tbname from %s.%s %s", dbName, sTblName, limitBuf); res = taos_query(taos, command); @@ -2805,13 +2805,13 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { - char command[BUFFER_SIZE] = "\0"; + char command[1024] = "\0"; TAOS_RES * res; TAOS_ROW row = NULL; int count = 0; //get schema use cmd: describe superTblName; - snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); + snprintf(command, 1024, "describe %s.%s", dbName, superTbls->sTblName); res = taos_query(taos, command); int32_t code = taos_errno(res); if (code != 0) { @@ -2891,7 +2891,8 @@ static int createSuperTable( TAOS * taos, char* dbName, SSuperTable* superTbl) { - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); char cols[COL_BUFFER_LEN] = "\0"; int colIndex; @@ -2902,6 +2903,7 @@ static int createSuperTable( if (superTbl->columnCount == 0) { errorPrint("%s() LN%d, super table column count is %d\n", __func__, __LINE__, superTbl->columnCount); + free(command); return -1; } @@ -2964,6 +2966,7 @@ static int createSuperTable( taos_close(taos); errorPrint("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); + free(command); exit(-1); } } @@ -2976,6 +2979,7 @@ static int createSuperTable( errorPrint("%s() LN%d, Failed when calloc, size:%d", __func__, __LINE__, len+1); taos_close(taos); + free(command); exit(-1); } @@ -2986,6 +2990,7 @@ static int createSuperTable( if (superTbl->tagCount == 0) { errorPrint("%s() LN%d, super table tag count is %d\n", __func__, __LINE__, superTbl->tagCount); + free(command); return -1; } @@ -3051,6 +3056,7 @@ static int createSuperTable( taos_close(taos); errorPrint("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); + free(command); exit(-1); } } @@ -3066,13 +3072,16 @@ static int createSuperTable( if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { errorPrint( "create supertable %s failed!\n\n", superTbl->sTblName); + free(command); return -1; } + debugPrint("create supertable %s success!\n\n", superTbl->sTblName); + free(command); return 0; } -static int createDatabasesAndStables() { +int createDatabasesAndStables(char *command) { TAOS * taos = NULL; int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); @@ -3080,8 +3089,7 @@ static int createDatabasesAndStables() { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } - char command[BUFFER_SIZE] = "\0"; - + for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.db[i].drop) { sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); @@ -7145,7 +7153,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } - char buffer[BUFFER_SIZE]; + char *buffer = calloc(1, BUFFER_SIZE); + assert(buffer); char *pstr = buffer; if ((superTblInfo) @@ -7174,8 +7183,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, ret, taos_stmt_errstr(pThreadInfo->stmt)); free(pids); free(infos); + free(buffer); exit(-1); } + + free(buffer); } #endif } else { @@ -7310,12 +7322,15 @@ static void *readTable(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; setThreadName("readTable"); - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); + uint64_t sTime = pThreadInfo->start_time; char *tb_prefix = pThreadInfo->tb_prefix; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); return NULL; } @@ -7354,6 +7369,7 @@ static void *readTable(void *sarg) { taos_free_result(pSql); taos_close(taos); fclose(fp); + free(command); return NULL; } @@ -7374,6 +7390,7 @@ static void *readTable(void *sarg) { } fprintf(fp, "\n"); fclose(fp); + free(command); #endif return NULL; } @@ -7383,10 +7400,13 @@ static void *readMetric(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; TAOS *taos = pThreadInfo->taos; setThreadName("readMetric"); - char command[BUFFER_SIZE] = "\0"; + char *command = calloc(1, BUFFER_SIZE); + assert(command); + FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + free(command); return NULL; } @@ -7431,6 +7451,7 @@ static void *readMetric(void *sarg) { taos_free_result(pSql); taos_close(taos); fclose(fp); + free(command); return NULL; } int count = 0; @@ -7448,6 +7469,7 @@ static void *readMetric(void *sarg) { fprintf(fp, "\n"); } fclose(fp); + free(command); #endif return NULL; } @@ -7484,11 +7506,16 @@ static int insertTestProcess() { init_rand_data(); // create database and super tables - if(createDatabasesAndStables() != 0) { + char *cmdBuffer = calloc(1, BUFFER_SIZE); + assert(cmdBuffer); + + if(createDatabasesAndStables(cmdBuffer) != 0) { if (g_fpOfInsertResult) fclose(g_fpOfInsertResult); + free(cmdBuffer); return -1; } + free(cmdBuffer); // pretreatement if (prepareSampleData() != 0) { @@ -7657,7 +7684,9 @@ static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { } static void *superTableQuery(void *sarg) { - char sqlstr[BUFFER_SIZE]; + char *sqlstr = calloc(1, BUFFER_SIZE); + assert(sqlstr); + threadInfo *pThreadInfo = (threadInfo *)sarg; setThreadName("superTableQuery"); @@ -7672,6 +7701,7 @@ static void *superTableQuery(void *sarg) { if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); + free(sqlstr); return NULL; } else { pThreadInfo->taos = taos; @@ -7696,7 +7726,7 @@ static void *superTableQuery(void *sarg) { st = taosGetTimestampMs(); for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - memset(sqlstr,0,sizeof(sqlstr)); + memset(sqlstr, 0, BUFFER_SIZE); replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { sprintf(pThreadInfo->filePath, "%s-%d", @@ -7727,6 +7757,7 @@ static void *superTableQuery(void *sarg) { (double)(et - st)/1000.0); } + free(sqlstr); return NULL; } @@ -7960,7 +7991,9 @@ static TAOS_SUB* subscribeImpl( static void *superSubscribe(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[BUFFER_SIZE]; + char *subSqlStr = calloc(1, BUFFER_SIZE); + assert(subSqlStr); + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; uint64_t tsubSeq; @@ -7969,6 +8002,7 @@ static void *superSubscribe(void *sarg) { if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); + free(subSqlStr); exit(-1); } @@ -7981,6 +8015,7 @@ static void *superSubscribe(void *sarg) { if (pThreadInfo->taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); + free(subSqlStr); return NULL; } } @@ -7991,6 +8026,7 @@ static void *superSubscribe(void *sarg) { taos_close(pThreadInfo->taos); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); + free(subSqlStr); return NULL; } @@ -8005,25 +8041,26 @@ static void *superSubscribe(void *sarg) { pThreadInfo->end_table_to, i); sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", i, pThreadInfo->querySeq); - memset(subSqlstr, 0, sizeof(subSqlstr)); + memset(subSqlStr, 0, BUFFER_SIZE); replaceChildTblName( g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], - subSqlstr, i); + subSqlStr, i); if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); } - verbosePrint("%s() LN%d, [%d] subSqlstr: %s\n", - __func__, __LINE__, pThreadInfo->threadID, subSqlstr); + verbosePrint("%s() LN%d, [%d] subSqlStr: %s\n", + __func__, __LINE__, pThreadInfo->threadID, subSqlStr); tsub[tsubSeq] = subscribeImpl( STABLE_CLASS, - pThreadInfo, subSqlstr, topic, + pThreadInfo, subSqlStr, topic, g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeInterval); if (NULL == tsub[tsubSeq]) { taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } } @@ -8080,12 +8117,13 @@ static void *superSubscribe(void *sarg) { consumed[tsubSeq]= 0; tsub[tsubSeq] = subscribeImpl( STABLE_CLASS, - pThreadInfo, subSqlstr, topic, + pThreadInfo, subSqlStr, topic, g_queryInfo.superQueryInfo.subscribeRestart, g_queryInfo.superQueryInfo.subscribeInterval ); if (NULL == tsub[tsubSeq]) { taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } } @@ -8105,6 +8143,7 @@ static void *superSubscribe(void *sarg) { } taos_close(pThreadInfo->taos); + free(subSqlStr); return NULL; } @@ -8407,9 +8446,7 @@ static void setParaFromArg() { tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); } - if (g_args.password) { - tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); - } + tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); if (g_args.port) { g_Dbs.port = g_args.port; diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py index 8fcb263125..f069bb8f70 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py @@ -36,7 +36,7 @@ class TDTestCase: if ("taosd" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): - buildPath = root[:len(root)-len("/build/bin")] + buildPath = root[:len(root) - len("/build/bin")] break return buildPath @@ -46,14 +46,15 @@ class TDTestCase: tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" + binPath = buildPath + "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql # insert data from a special timestamp # check stable stb0 - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % binPath) + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json -y " % + binPath) tdSql.execute("use nsdb") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -64,9 +65,9 @@ class TDTestCase: tdSql.query("select count(*) from stb0") tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") - tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") tdSql.query("select last(ts) from stb0") - tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") + tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") # check stable stb1 which is insert with disord @@ -78,16 +79,18 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb1") - tdSql.checkDataType(9, 1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") # check insert timestamp_step is nano_second tdSql.query("select last(ts) from stb1") - tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000") - + tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000") + # insert data from now time # check stable stb0 - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % binPath) - + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % + binPath) + tdSql.execute("use nsdb2") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -99,11 +102,14 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) # check c8 is an nano timestamp tdSql.query("describe stb0") - tdSql.checkDataType(9,1,"TIMESTAMP") + tdSql.checkDataType(9, 1, "TIMESTAMP") - # insert by csv files and timetamp is long int , strings in ts and cols - - os.system("%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % binPath) + # insert by csv files and timetamp is long int , strings in ts and + # cols + + os.system( + "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json -y " % + binPath) tdSql.execute("use nsdbcsv") tdSql.query("show stables") tdSql.checkData(0, 4, 100) @@ -111,29 +117,37 @@ class TDTestCase: tdSql.checkData(0, 0, 10000) tdSql.query("describe stb0") tdSql.checkDataType(3, 1, "TIMESTAMP") - tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") + tdSql.query( + "select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"") tdSql.checkData(0, 0, 5000) tdSql.query("select count(*) from stb0 where ts < 1626918583000000000") tdSql.checkData(0, 0, 10000) - - os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql") - # taosdemo test insert with command and parameter , detals show taosdemo --help - os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("rm -rf ./insert_res.txt") + os.system( + "rm -rf tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNano*.py.sql") + + # taosdemo test insert with command and parameter , detals show + # taosdemo --help + os.system( + "%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % + binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) # check taosdemo -s - sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 3600 days 6 update 1;', - 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', - 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', - 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', - 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] + sqls_ls = [ + 'drop database if exists nsdbsql;', + 'create database nsdbsql precision "ns" keep 3600 days 6 update 1;', + 'use nsdbsql;', + 'CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);', + 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);', + 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);'] - with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files: + with open("./taosdemoTestNanoCreateDB.sql", mode="a") as sql_files: for sql in sqls_ls: - sql_files.write(sql+"\n") + sql_files.write(sql + "\n") sql_files.close() sleep(10) @@ -141,11 +155,10 @@ class TDTestCase: os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath) tdSql.query("select count(*) from nsdbsql.meters") tdSql.checkData(0, 0, 2) - + os.system("rm -rf ./res.txt") os.system("rm -rf ./*.py.sql") os.system("rm -rf ./taosdemoTestNanoCreateDB.sql") - def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py index 266a8fa712..c3fdff00ec 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py @@ -120,7 +120,7 @@ class TDTestCase: os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql") # taosdemo test insert with command and parameter , detals show taosdemo --help - os.system("%staosdemo -u root -P taosdata -p 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) + os.system("%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath) tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, 600) # check taosdemo -s diff --git a/tests/pytest/tools/taosdemoTestInterlace.py b/tests/pytest/tools/taosdemoTestInterlace.py index 4c551f327a..30c04729b7 100644 --- a/tests/pytest/tools/taosdemoTestInterlace.py +++ b/tests/pytest/tools/taosdemoTestInterlace.py @@ -49,7 +49,7 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -pp 2>&1 | grep sleep | wc -l" % binPath + taosdemoCmd = "%staosdemo -f tools/insert-interlace.json -PP 2>&1 | grep sleep | wc -l" % binPath sleepTimes = subprocess.check_output( taosdemoCmd, shell=True).decode("utf-8") print("sleep times: %d" % int(sleepTimes)) From 0309fd44126e3ff6ce44cd516f6b87163a5b46ef Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 Aug 2021 10:17:47 +0800 Subject: [PATCH 031/165] [td-255] fix compiler error on windows. --- src/query/src/qExtbuffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/query/src/qExtbuffer.c b/src/query/src/qExtbuffer.c index 91004fe707..5152d17264 100644 --- a/src/query/src/qExtbuffer.c +++ b/src/query/src/qExtbuffer.c @@ -1113,7 +1113,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf for(int32_t i = 0; i < numOfRows; ++i) { char* dest = buf + size * i; - memcpy(dest, pCols[index] + bytes * i, bytes); + memcpy(dest, ((char*) pCols[index]) + bytes * i, bytes); *(int32_t*)(dest+bytes) = i; } @@ -1128,7 +1128,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf if (i == index) { for(int32_t j = 0; j < numOfRows; ++j){ char* src = buf + (j * size); - char* dest = pCols[i] + (j * bytes1); + char* dest = ((char*)pCols[i]) + (j * bytes1); memcpy(dest, src, bytes1); } } else { @@ -1144,7 +1144,7 @@ void taoscQSort(void** pCols, SSchema* pSchema, int32_t numOfCols, int32_t numOf memcpy(p, pCols[i], bytes1 * numOfRows); for(int32_t j = 0; j < numOfRows; ++j){ - char* dest = pCols[i] + bytes1 * j; + char* dest = ((char*)pCols[i]) + bytes1 * j; int32_t newPos = *(int32_t*)(buf + (j * size) + bytes); char* src = p + (newPos * bytes1); From 7fc6dc17fec04d4fce2babe002ce865bee0aca89 Mon Sep 17 00:00:00 2001 From: cpwu Date: Fri, 13 Aug 2021 10:27:27 +0800 Subject: [PATCH 032/165] [TD-5933] add case for TD-5933 --- tests/pytest/functions/queryTestCases.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index ae2bbc4a81..8523c6310e 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -714,6 +714,12 @@ class TDTestCase: tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) fill(next) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" tdSql.query(fillsql) fillResult=False @@ -724,7 +730,7 @@ class TDTestCase: else: tdLog.exit("fill(next) is wrong") - + pass def run(self): From ecea1ce747a00ef859e3ec3241eb803358dac1a8 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 11:17:55 +0800 Subject: [PATCH 033/165] fixed realloc can cause memory leak --- deps/MsvcLibX/src/iconv.c | 6 ++++-- deps/MsvcLibX/src/main.c | 10 +++++++--- deps/MsvcLibX/src/realpath.c | 16 ++++++++++++---- src/balance/src/bnScore.c | 3 ++- src/client/src/tscPrepare.c | 5 +++-- src/client/src/tscSql.c | 4 +++- src/client/src/tscUtil.c | 12 +++++++++--- src/common/inc/tdataformat.h | 10 ++++++---- src/common/src/tdataformat.c | 5 +++-- src/kit/shell/src/shellCheck.c | 5 +++-- src/kit/taospack/taospack.c | 5 ++++- src/mnode/src/mnodeTable.c | 5 +++-- src/os/src/detail/osMemory.c | 5 +++-- src/os/src/windows/wGetline.c | 6 ++++-- src/query/src/qTsbuf.c | 7 +++++-- 15 files changed, 71 insertions(+), 33 deletions(-) diff --git a/deps/MsvcLibX/src/iconv.c b/deps/MsvcLibX/src/iconv.c index 40b6e6462d..1ec0dc7354 100644 --- a/deps/MsvcLibX/src/iconv.c +++ b/deps/MsvcLibX/src/iconv.c @@ -98,6 +98,7 @@ int ConvertString(char *buf, size_t nBytes, UINT cpFrom, UINT cpTo, LPCSTR lpDef char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefaultChar) { int nBytes; char *pBuf; + char *pBuf1; nBytes = 4 * ((int)lstrlen(string) + 1); /* Worst case for the size needed */ pBuf = (char *)malloc(nBytes); if (!pBuf) { @@ -110,8 +111,9 @@ char *DupAndConvert(const char *string, UINT cpFrom, UINT cpTo, LPCSTR lpDefault free(pBuf); return NULL; } - pBuf = realloc(pBuf, nBytes+1); - return pBuf; + pBuf1 = realloc(pBuf, nBytes+1); + if(pBuf1 == NULL && pBuf != NULL) free(pBuf); + return pBuf1; } int CountCharacters(const char *string, UINT cp) { diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c index f366b081ad..9b9b4a46af 100644 --- a/deps/MsvcLibX/src/main.c +++ b/deps/MsvcLibX/src/main.c @@ -68,6 +68,7 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) { int iString = FALSE; /* TRUE = string mode; FALSE = non-string mode */ int nBackslash = 0; char **ppszArg; + char **ppszArg1; int iArg = FALSE; /* TRUE = inside an argument; FALSE = between arguments */ ppszArg = (char **)malloc((argc+1)*sizeof(char *)); @@ -89,7 +90,10 @@ int BreakArgLine(LPSTR pszCmdLine, char ***pppszArg) { if ((!iArg) && (c != ' ') && (c != '\t')) { /* Beginning of a new argument */ iArg = TRUE; ppszArg[argc++] = pszCopy+j; - ppszArg = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); + ppszArg1 = (char **)realloc(ppszArg, (argc+1)*sizeof(char *)); + if(ppszArg1 == NULL && ppszArg != NULL) + free(ppszArg); + ppszArg = ppszArg1; if (!ppszArg) return -1; pszCopy[j] = c0 = '\0'; } @@ -212,14 +216,14 @@ int _initU(void) { fprintf(stderr, "Warning: Can't convert the argument line to UTF-8\n"); _acmdln[0] = '\0'; } - realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ + //realloc(_acmdln, n+1); /* Resize the memory block to fit the UTF-8 line */ /* Should not fail since we make it smaller */ /* Record the console code page, to allow converting the output accordingly */ codePage = GetConsoleOutputCP(); return 0; -} +}ß #endif /* defined(_WIN32) */ diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 5fbcf773a2..ec1b606bf6 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,6 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; + char *pOutbuf1; int iErr; const char *pc; @@ -242,8 +243,11 @@ realpath_failed: return NULL; } - if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); - return pOutbuf; + if (!outbuf) { + pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1); + if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf); + } + return pOutbuf1; } #endif @@ -517,6 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; + char *pOutbuf1; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; @@ -590,10 +595,13 @@ realpathU_failed: } DEBUG_LEAVE(("return 0x%p; // \"%s\"\n", pOutbuf, pOutbuf)); - if (!outbuf) pOutbuf = realloc(pOutbuf, strlen(pOutbuf) + 1); + if (!outbuf) { + pOutbuf1 = realloc(pOutbuf, strlen(pOutbuf) + 1); + if(pOutbuf1 == NULL && pOutbuf) free(pOutbuf); + } free(pPath1); free(pPath2); - return pOutbuf; + return pOutbuf1; } #endif /* defined(_WIN32) */ diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c index 7d94df1c23..a2b9ba8e09 100644 --- a/src/balance/src/bnScore.c +++ b/src/balance/src/bnScore.c @@ -117,7 +117,8 @@ void bnCleanupDnodes() { static void bnCheckDnodesSize(int32_t dnodesNum) { if (tsBnDnodes.maxSize <= dnodesNum) { tsBnDnodes.maxSize = dnodesNum * 2; - tsBnDnodes.list = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); + SDnodeObj** list1 = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); + if(list1) tsBnDnodes.list = list1; } } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 40664241c1..06a9505086 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -1527,8 +1527,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { pCmd->insertParam.insertType = TSDB_QUERY_TYPE_STMT_INSERT; pCmd->insertParam.objectId = pSql->self; - pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); - + char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr); + pSql->sqlstr = sqlstr; if (pSql->sqlstr == NULL) { tscError("%p failed to malloc sql string buffer", pSql); STMT_RET(TSDB_CODE_TSC_OUT_OF_MEMORY); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 6f3d5c3a63..026c65a595 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -887,7 +887,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) { return TSDB_CODE_TSC_EXCEED_SQL_LIMIT; } - pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + char* sqlstr = realloc(pSql->sqlstr, sqlLen + 1); + if(sqlstr == NULL && pSql->sqlstr) free(pSql->sqlstr); + pSql->sqlstr = sqlstr; if (pSql->sqlstr == NULL) { tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self); tfree(pSql); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3c35795b0d..ca7a5cbf77 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -625,8 +625,10 @@ static void setResRawPtrImpl(SSqlRes* pRes, SInternalField* pInfo, int32_t i, bo } else if (convertNchar && pInfo->field.type == TSDB_DATA_TYPE_NCHAR) { // convert unicode to native code in a temporary buffer extra one byte for terminated symbol - pRes->buffer[i] = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); - + char* buffer = realloc(pRes->buffer[i], pInfo->field.bytes * pRes->numOfRows); + if(buffer == NULL) + return ; + pRes->buffer[i] = buffer; // string terminated char for binary data memset(pRes->buffer[i], 0, pInfo->field.bytes * pRes->numOfRows); @@ -4364,6 +4366,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, STableMeta* p = NULL; size_t sz = 0; STableMeta* pChild = *ppChild; + STableMeta* pChild1; taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz); @@ -4374,7 +4377,10 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, int32_t totalBytes = (p->tableInfo.numOfColumns + p->tableInfo.numOfTags) * sizeof(SSchema); int32_t tableMetaSize = sizeof(STableMeta) + totalBytes; if (*tableMetaCapacity < tableMetaSize) { - pChild = realloc(pChild, tableMetaSize); + pChild1 = realloc(pChild, tableMetaSize); + if(pChild1 == NULL) + return -1; + pChild = pChild1; *tableMetaCapacity = (size_t)tableMetaSize; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 46259c8488..a01c377539 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -547,8 +547,9 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder); static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) { if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; - pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); - if (pBuilder->pColIdx == NULL) return -1; + SColIdx* pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols); + if (pColIdx == NULL) return -1; + pBuilder->pColIdx = pColIdx; } pBuilder->pColIdx[pBuilder->nCols].colId = colId; @@ -561,8 +562,9 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, while (tlen > pBuilder->alloc - pBuilder->size) { pBuilder->alloc *= 2; } - pBuilder->buf = realloc(pBuilder->buf, pBuilder->alloc); - if (pBuilder->buf == NULL) return -1; + void* buf = realloc(pBuilder->buf, pBuilder->alloc); + if (buf == NULL) return -1; + pBuilder->buf = buf; } memcpy(POINTER_SHIFT(pBuilder->buf, pBuilder->size), value, tlen); diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index a3a6c0fed4..181afa225d 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -138,8 +138,9 @@ int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int1 if (pBuilder->nCols >= pBuilder->tCols) { pBuilder->tCols *= 2; - pBuilder->columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); - if (pBuilder->columns == NULL) return -1; + STColumn* columns = (STColumn *)realloc(pBuilder->columns, sizeof(STColumn) * pBuilder->tCols); + if (columns == NULL) return -1; + pBuilder->columns = columns; } STColumn *pCol = &(pBuilder->columns[pBuilder->nCols]); diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c index d78f1a6b99..7fc8b1409a 100644 --- a/src/kit/shell/src/shellCheck.c +++ b/src/kit/shell/src/shellCheck.c @@ -72,12 +72,13 @@ static int32_t shellShowTables(TAOS *con, char *db) { int32_t tbIndex = tbNum++; if (tbMallocNum < tbNum) { tbMallocNum = (tbMallocNum * 2 + 1); - tbNames = realloc(tbNames, tbMallocNum * sizeof(char *)); - if (tbNames == NULL) { + char** tbNames1 = realloc(tbNames, tbMallocNum * sizeof(char *)); + if (tbNames1 == NULL) { fprintf(stdout, "failed to malloc tablenames, num:%d\n", tbMallocNum); code = TSDB_CODE_TSC_OUT_OF_MEMORY; break; } + tbNames = tbNames1; } tbNames[tbIndex] = malloc(TSDB_TABLE_NAME_LEN); diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c index 33d779dfcf..1b22c16ee0 100644 --- a/src/kit/taospack/taospack.c +++ b/src/kit/taospack/taospack.c @@ -148,7 +148,10 @@ float* read_float(const char* inFile, int* pcount){ //printf(" buff=%s float=%.50f \n ", buf, floats[fi]); if ( ++fi == malloc_cnt ) { malloc_cnt += 100000; - floats = realloc(floats, malloc_cnt*sizeof(float)); + float* floats1 = realloc(floats, malloc_cnt*sizeof(float)); + if(floats1 == NULL) + break; + floats = floats1; } memset(buf, 0, sizeof(buf)); } diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 0bc114ffdf..a4ecf5d6f3 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -2921,10 +2921,11 @@ static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray (*totalMallocLen) *= 2; } - pMultiMeta = realloc(pMultiMeta, *totalMallocLen); - if (pMultiMeta == NULL) { + SMultiTableMeta* pMultiMeta1 = realloc(pMultiMeta, *totalMallocLen); + if (pMultiMeta1 == NULL) { return NULL; } + pMultiMeta = pMultiMeta1; } return pMultiMeta; diff --git a/src/os/src/detail/osMemory.c b/src/os/src/detail/osMemory.c index d8194feab4..22954f1523 100644 --- a/src/os/src/detail/osMemory.c +++ b/src/os/src/detail/osMemory.c @@ -504,8 +504,9 @@ void * taosTRealloc(void *ptr, size_t size) { void * tptr = (void *)((char *)ptr - sizeof(size_t)); size_t tsize = size + sizeof(size_t); - tptr = realloc(tptr, tsize); - if (tptr == NULL) return NULL; + void* tptr1 = realloc(tptr, tsize); + if (tptr1 == NULL) return NULL; + tptr = tptr1; *(size_t *)tptr = size; diff --git a/src/os/src/windows/wGetline.c b/src/os/src/windows/wGetline.c index 553aecaf0a..aa45854884 100644 --- a/src/os/src/windows/wGetline.c +++ b/src/os/src/windows/wGetline.c @@ -81,11 +81,13 @@ int32_t getstr(char **lineptr, size_t *n, FILE *stream, char terminator, int32_t *n += MIN_CHUNK; nchars_avail = (int32_t)(*n + *lineptr - read_pos); - *lineptr = realloc(*lineptr, *n); - if (!*lineptr) { + char* lineptr1 = realloc(*lineptr, *n); + if (!lineptr1) { errno = ENOMEM; return -1; } + *lineptr = lineptr1; + read_pos = *n - nchars_avail + *lineptr; assert((*lineptr + *n) == (read_pos + nchars_avail)); } diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index 825b7960de..4cf05dd2c7 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -223,8 +223,11 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) { static void shrinkBuffer(STSList* ptsData) { // shrink tmp buffer size if it consumes too many memory compared to the pre-defined size if (ptsData->allocSize >= ptsData->threshold * 2) { - ptsData->rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); - ptsData->allocSize = MEM_BUF_SIZE; + char* rawBuf = realloc(ptsData->rawBuf, MEM_BUF_SIZE); + if(rawBuf) { + ptsData->rawBuf = rawBuf; + ptsData->allocSize = MEM_BUF_SIZE; + } } } From 92652f449186e766e3306dbeba28d0e7d6d52501 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 11:47:39 +0800 Subject: [PATCH 034/165] bnScore.c modify ok --- src/balance/src/bnScore.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/balance/src/bnScore.c b/src/balance/src/bnScore.c index a2b9ba8e09..04a14357c9 100644 --- a/src/balance/src/bnScore.c +++ b/src/balance/src/bnScore.c @@ -116,9 +116,17 @@ void bnCleanupDnodes() { static void bnCheckDnodesSize(int32_t dnodesNum) { if (tsBnDnodes.maxSize <= dnodesNum) { - tsBnDnodes.maxSize = dnodesNum * 2; - SDnodeObj** list1 = realloc(tsBnDnodes.list, tsBnDnodes.maxSize * sizeof(SDnodeObj *)); - if(list1) tsBnDnodes.list = list1; + int32_t maxSize = dnodesNum * 2; + SDnodeObj** list1 = NULL; + int32_t retry = 0; + + while(list1 == NULL && retry++ < 3) { + list1 = realloc(tsBnDnodes.list, maxSize * sizeof(SDnodeObj *)); + } + if(list1) { + tsBnDnodes.list = list1; + tsBnDnodes.maxSize = maxSize; + } } } From 490702c941b0ebb29bd6471dce58d587eea37e23 Mon Sep 17 00:00:00 2001 From: tickduan <417921451@qq.com> Date: Fri, 13 Aug 2021 13:26:32 +0800 Subject: [PATCH 035/165] msvclibX main.c error append a charater --- deps/MsvcLibX/src/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/main.c b/deps/MsvcLibX/src/main.c index 9b9b4a46af..85f4c83f24 100644 --- a/deps/MsvcLibX/src/main.c +++ b/deps/MsvcLibX/src/main.c @@ -223,7 +223,7 @@ int _initU(void) { codePage = GetConsoleOutputCP(); return 0; -}ß +} #endif /* defined(_WIN32) */ From 16c429760665d8486e9a3089617bb9b8de28fd5d Mon Sep 17 00:00:00 2001 From: wu champion Date: Fri, 13 Aug 2021 14:47:54 +0800 Subject: [PATCH 036/165] Update queryTestCases.py --- tests/pytest/functions/queryTestCases.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index 8523c6310e..8cd3ef6b3a 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -715,7 +715,7 @@ class TDTestCase: tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## - stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) fill(next) limit 10" + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" tdSql.query(stddevAndIntervalSql) tdSql.checkRows(10) From 9867ea1c156ce8a46a1dadf3fcc4ec029a29cf11 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 13 Aug 2021 14:55:56 +0800 Subject: [PATCH 037/165] [ci skip]: reduce data insertion time --- tests/perftest-scripts/perftest-query.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index d4853c0825..68b64fd4e0 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -105,10 +105,10 @@ function runQueryPerfTest { python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 100 | tee -a $PERFORMANCE_TEST_REPORT } From 3948b97e85e19288859c857a3a8bbaee542ac4ad Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 15:14:17 +0800 Subject: [PATCH 038/165] [TD-6044]: WAL compatibility since v2.1.5.0 --- src/inc/taoserror.h | 1 + src/inc/twal.h | 2 +- src/util/src/terror.c | 1 + src/wal/src/walWrite.c | 97 +++++++++++++++++++++++++++++++++++++++--- 4 files changed, 94 insertions(+), 7 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 2214078f55..d7e1592911 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -306,6 +306,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") #define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") #define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") +#define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1003) //"WAL out of memory") // http #define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not onlin") diff --git a/src/inc/twal.h b/src/inc/twal.h index bce398d6f9..868a1fbd78 100644 --- a/src/inc/twal.h +++ b/src/inc/twal.h @@ -32,7 +32,7 @@ typedef enum { typedef struct { int8_t msgType; - int8_t sver; + int8_t sver; // sver 2 for WAL SDataRow/SMemRow compatibility int8_t reserved[2]; int32_t len; uint64_t version; diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 42fc76e6c9..8d2ef29c8c 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -314,6 +314,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, "Invalid msg type") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, "Unexpected generic error in wal") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit") +TAOS_DEFINE_ERROR(TSDB_CODE_WAL_OUT_OF_MEMORY, "WAL out of memory") // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, "http server is not onlin") diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 4774998799..c1b3c1ac3f 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -17,6 +17,7 @@ #define TAOS_RANDOM_FILE_FAIL_TEST #include "os.h" #include "taoserror.h" +#include "taosmsg.h" #include "tchecksum.h" #include "tfile.h" #include "twal.h" @@ -114,7 +115,7 @@ void walRemoveAllOldFiles(void *handle) { #if defined(WAL_CHECKSUM_WHOLE) static void walUpdateChecksum(SWalHead *pHead) { - pHead->sver = 1; + pHead->sver = 2; pHead->cksum = 0; pHead->cksum = taosCalcChecksum(0, (uint8_t *)pHead, sizeof(*pHead) + pHead->len); } @@ -122,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) { static int walValidateChecksum(SWalHead *pHead) { if (pHead->sver == 0) { // for compatible with wal before sver 1 return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); - } else if (pHead->sver == 1) { + } else if (pHead->sver == 2 || pHead->sver == 1) { uint32_t cksum = pHead->cksum; pHead->cksum = 0; return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); @@ -281,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_SUCCESS; } - if (pHead->sver == 1) { + if (pHead->sver == 2 || pHead->sver == 1) { if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); return TSDB_CODE_WAL_FILE_CORRUPTED; @@ -306,7 +307,85 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_WAL_FILE_CORRUPTED; } +// Add SMemRowType ahead of SDataRow +static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) { + memcpy(pDest, pSrc, sizeof(SSubmitBlk)); + int nRows = htons(pSrc->numOfRows); + if (nRows <= 0) { + return; + } + char *pDestData = pDest->data; + char *pSrcData = pSrc->data; + for (int i = 0; i < nRows; ++i) { + memRowSetType(pDestData, SMEM_ROW_DATA); + memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData)); + pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData)); + pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData)); + ++(*lenExpand); + } + int32_t dataLen = htonl(pDest->dataLen); + pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t)); +} +static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { + int32_t len = 0; + for (int i = 0; i < nRows; ++i) { + len += dataRowLen(pBlkData); + if (len > dataLen) { + return false; + } + pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData)); + } + if (len != dataLen) { + return false; + } + return true; +} +// for WAL SMemRow/SDataRow compatibility +static int walSMemRowCheck(SWalHead *pHead) { + if ((pHead->sver < 2) && (pHead->msgType == TSDB_MSG_TYPE_SUBMIT)) { + SSubmitMsg *pMsg = (SSubmitMsg *)pHead->cont; + int32_t numOfBlocks = htonl(pMsg->numOfBlocks); + if (numOfBlocks <= 0) { + return 0; + } + + int32_t nTotalRows = 0; + SSubmitBlk *pBlk = (SSubmitBlk *)pMsg->blocks; + for (int32_t i = 0; i < numOfBlocks; ++i) { + int32_t dataLen = htonl(pBlk->dataLen); + int32_t nRows = htons(pBlk->numOfRows); + nTotalRows += nRows; + if (!walIsSDataRow(pBlk->data, nRows, dataLen)) { + return 0; + } + pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen); + } + + SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); + if (pWalHead == NULL) { + return TSDB_CODE_WAL_OUT_OF_MEMORY; + } + // len should be updated + memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); + + SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont; + SSubmitBlk *pDestBlks = (SSubmitBlk *)pDestMsg->blocks; + SSubmitBlk *pSrcBlks = (SSubmitBlk *)pMsg->blocks; + int32_t lenExpand = 0; + for (int32_t i = 0; i < numOfBlocks; ++i) { + expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand); + } + if (lenExpand > 0) { + pDestMsg->length = htonl(htonl(pDestMsg->length) + lenExpand); + pWalHead->len = pWalHead->len + lenExpand; + } + + memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); + tfree(pWalHead); + } + return 0; +} static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, char *name, int64_t fileId) { int32_t size = WAL_MAX_SIZE; @@ -346,7 +425,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch } #if defined(WAL_CHECKSUM_WHOLE) - if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 1) { + if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 2) { wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -379,7 +458,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } - if (pHead->sver == 1 && !walValidateChecksum(pHead)) { + if ((pHead->sver == 2 || pHead->sver == 1) && !walValidateChecksum(pHead)) { wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -431,7 +510,13 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch pWal->version = pHead->version; - //wInfo("writeFp: %ld", offset); + // wInfo("writeFp: %ld", offset); + + if (0 != walSMemRowCheck(pHead)) { + wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, + pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + } + (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From 56d0d69a487bd91bfe5ce293bcbc8187e2aba048 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 13 Aug 2021 17:05:30 +0800 Subject: [PATCH 039/165] [TD-5538]: add --force-keep-file option --- src/common/src/tglobal.c | 3 +++ src/dnode/src/dnodeSystem.c | 2 ++ src/tsdb/inc/tsdbFS.h | 3 +++ src/tsdb/src/tsdbFS.c | 38 +++++++++++++++++++++++++++++++++++++ src/util/inc/tconfig.h | 1 + 5 files changed, 47 insertions(+) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index ec7249cef5..198d57b6b9 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -26,6 +26,9 @@ #include "tlocale.h" #include "ttimezone.h" +// TSDB +bool tsdbForceKeepFile = false; + // cluster char tsFirst[TSDB_EP_LEN] = {0}; char tsSecond[TSDB_EP_LEN] = {0}; diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index e49b3eba99..c3e4dae206 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) { } } else if (strcmp(argv[i], "-C") == 0) { dump_config = 1; + } else if (strcmp(argv[i], "--force-keep-file") == 0) { + tsdbForceKeepFile = true; } else if (strcmp(argv[i], "-V") == 0) { #ifdef _ACCT char *versionStr = "enterprise"; diff --git a/src/tsdb/inc/tsdbFS.h b/src/tsdb/inc/tsdbFS.h index d63aeb14ac..367d905522 100644 --- a/src/tsdb/inc/tsdbFS.h +++ b/src/tsdb/inc/tsdbFS.h @@ -18,6 +18,9 @@ #define TSDB_FS_VERSION 0 +// ================== TSDB global config +extern bool tsdbForceKeepFile; + // ================== CURRENT file header info typedef struct { uint32_t version; // Current file system version (relating to code) diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index c9f087a5cf..37f9f0af98 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -982,6 +982,26 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pfs->cstatus->pmf->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseMFile(pfs->cstatus->pmf); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + + if (pfs->cstatus->pmf->info.size != tfstat.st_size) { + int64_t tfsize = pfs->cstatus->pmf->info.size; + pfs->cstatus->pmf->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), tfsize, pfs->cstatus->pmf->info.size); + } + } + tsdbCloseMFile(pfs->cstatus->pmf); } } else if (code == REG_NOMATCH) { @@ -1141,6 +1161,24 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pDFile->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + taosArrayDestroy(fArray); + return -1; + } + + if (pDFile->info.size != tfstat.st_size) { + int64_t tfsize = pDFile->info.size; + pDFile->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size); + } + } + tsdbCloseDFile(pDFile); index++; } diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index fdb2595fd8..d6e0f4ca46 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -77,6 +77,7 @@ typedef struct { extern SGlobalCfg tsGlobalConfig[]; extern int32_t tsGlobalConfigNum; extern char * tsCfgStatusStr[]; +extern bool tsdbForceKeepFile; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); From 124be91c397bd2889961b4e8dd7d4475e7c9effd Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 13 Aug 2021 17:19:19 +0800 Subject: [PATCH 040/165] speed up drone in master --- .drone.yml | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/.drone.yml b/.drone.yml index b520f308ba..085a07acf9 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,7 +15,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -23,6 +23,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: test_arm64_bionic @@ -39,7 +40,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -66,7 +67,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -91,7 +92,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -116,7 +117,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -142,7 +143,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch32 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -150,6 +151,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_trusty @@ -168,7 +170,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -176,6 +178,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_xenial @@ -193,7 +196,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -201,7 +204,7 @@ steps: branch: - develop - master - + - 2.0 --- kind: pipeline name: build_bionic @@ -218,7 +221,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -226,6 +229,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_centos7 @@ -241,7 +245,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -249,4 +253,4 @@ steps: branch: - develop - master - + - 2.0 \ No newline at end of file From 7d0e9c515e1902df66fb4a97f8234a100fae7883 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 13 Aug 2021 17:21:36 +0800 Subject: [PATCH 041/165] speed up drone in 2.0 --- .drone.yml | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/.drone.yml b/.drone.yml index b520f308ba..085a07acf9 100644 --- a/.drone.yml +++ b/.drone.yml @@ -15,7 +15,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -23,6 +23,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: test_arm64_bionic @@ -39,7 +40,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -66,7 +67,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -91,7 +92,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -116,7 +117,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch64 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -142,7 +143,7 @@ steps: - mkdir debug - cd debug - cmake .. -DCPUTYPE=aarch32 > /dev/null - - make + - make -j4 trigger: event: - pull_request @@ -150,6 +151,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_trusty @@ -168,7 +170,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -176,6 +178,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_xenial @@ -193,7 +196,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -201,7 +204,7 @@ steps: branch: - develop - master - + - 2.0 --- kind: pipeline name: build_bionic @@ -218,7 +221,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -226,6 +229,7 @@ steps: branch: - develop - master + - 2.0 --- kind: pipeline name: build_centos7 @@ -241,7 +245,7 @@ steps: - mkdir debug - cd debug - cmake .. - - make + - make -j4 trigger: event: - pull_request @@ -249,4 +253,4 @@ steps: branch: - develop - master - + - 2.0 \ No newline at end of file From d8622364aafb84b2030e37f94b6266909ff195fa Mon Sep 17 00:00:00 2001 From: wpan Date: Fri, 13 Aug 2021 17:22:19 +0800 Subject: [PATCH 042/165] fix interp stable query issue --- src/client/src/tscSubquery.c | 1 - src/query/src/qAggMain.c | 18 +- src/query/src/qExecutor.c | 10 +- src/query/src/qPlan.c | 2 +- tests/script/general/parser/interp_test.sim | 246 ++++++++++++++++++++ 5 files changed, 263 insertions(+), 14 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 2e7e02cfd5..1a138f2b48 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2812,7 +2812,6 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p pParentSql->self, pState->numOfSub, pState->numOfRetrievedRows); SQueryInfo *pPQueryInfo = tscGetQueryInfo(&pParentSql->cmd); - tscClearInterpInfo(pPQueryInfo); code = tscCreateGlobalMerger(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, pPQueryInfo, &pParentSql->res.pMerger, pParentSql->self); pParentSql->res.code = code; diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 6eadedcaf3..14d3f4e417 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3670,6 +3670,8 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { return; } + bool ascQuery = (pCtx->order == TSDB_ORDER_ASC); + if (pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { *(TSKEY *)pCtx->pOutput = pCtx->startTs; } else if (type == TSDB_FILL_NULL) { @@ -3677,7 +3679,7 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { } else if (type == TSDB_FILL_SET_VALUE) { tVariantDump(&pCtx->param[1], pCtx->pOutput, pCtx->inputType, true); } else { - if (pCtx->start.key != INT64_MIN && pCtx->start.key < pCtx->startTs && pCtx->end.key > pCtx->startTs) { + if (pCtx->start.key != INT64_MIN && ((ascQuery && pCtx->start.key <= pCtx->startTs && pCtx->end.key >= pCtx->startTs) || ((!ascQuery) && pCtx->start.key >= pCtx->startTs && pCtx->end.key <= pCtx->startTs))) { if (type == TSDB_FILL_PREV) { if (IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL) { SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, pCtx->start.val); @@ -3716,13 +3718,14 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY skey = GET_TS_DATA(pCtx, 0); if (type == TSDB_FILL_PREV) { - if (skey > pCtx->startTs) { + if ((ascQuery && skey > pCtx->startTs) || ((!ascQuery) && skey < pCtx->startTs)) { return; } if (pCtx->size > 1) { TSKEY ekey = GET_TS_DATA(pCtx, 1); - if (ekey > skey && ekey <= pCtx->startTs) { + if ((ascQuery && ekey > skey && ekey <= pCtx->startTs) || + ((!ascQuery) && ekey < skey && ekey >= pCtx->startTs)){ skey = ekey; } } @@ -3731,10 +3734,10 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY ekey = skey; char* val = NULL; - if (ekey < pCtx->startTs) { + if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) { if (pCtx->size > 1) { ekey = GET_TS_DATA(pCtx, 1); - if (ekey < pCtx->startTs) { + if ((ascQuery && ekey < pCtx->startTs) || ((!ascQuery) && ekey > pCtx->startTs)) { return; } @@ -3755,12 +3758,11 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { TSKEY ekey = GET_TS_DATA(pCtx, 1); // no data generated yet - if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) { + if ((ascQuery && !(skey <= pCtx->startTs && ekey >= pCtx->startTs)) + || ((!ascQuery) && !(skey >= pCtx->startTs && ekey <= pCtx->startTs))) { return; } - assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs); - char *start = GET_INPUT_DATA(pCtx, 0); char *end = GET_INPUT_DATA(pCtx, 1); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index b1fb2add1b..bb16b74d3d 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1595,6 +1595,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe SResultRow* pResult = NULL; int32_t forwardStep = 0; int32_t ret = 0; + STimeWindow preWin = win; while (1) { // null data, failed to allocate more memory buffer @@ -1609,12 +1610,13 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); - + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + preWin = win; + int32_t prevEndPos = (forwardStep - 1) * step + startPos; startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); if (startPos < 0) { - if (win.skey <= pQueryAttr->window.ekey) { + if ((ascQuery && win.skey <= pQueryAttr->window.ekey) || ((!ascQuery) && win.ekey >= pQueryAttr->window.ekey)) { int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -1625,7 +1627,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); } break; diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index b8a5ee7699..ad286f7afa 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -693,7 +693,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { } // fill operator - if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) { + if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) { op = OP_Fill; taosArrayPush(plan, &op); } diff --git a/tests/script/general/parser/interp_test.sim b/tests/script/general/parser/interp_test.sim index 845afb0173..5a2021dcfc 100644 --- a/tests/script/general/parser/interp_test.sim +++ b/tests/script/general/parser/interp_test.sim @@ -930,8 +930,254 @@ if $data44 != @18-11-25 19:06:00.000@ then endi +sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear); +if $rows != 8 then + return -1 +endi +if $data00 != @18-09-17 20:35:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:37:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data51 != 0 then + return -1 +endi +if $data60 != @18-09-17 20:41:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi +sql select interp(c1) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:42:00.000' interval(1m) fill(linear) order by ts desc; +if $rows != 8 then + return -1 +endi +if $data00 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:41:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data21 != 0 then + return -1 +endi +if $data30 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:37:00.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi +if $data60 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @18-09-17 20:35:00.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(2m) fill(linear) order by ts; +if $rows != 9 then + return -1 +endi +if $data00 != @18-09-17 20:34:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:38:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:40:00.000@ then + return -1 +endi +if $data31 != 0.00000 then + return -1 +endi +if $data40 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data41 != 0.20000 then + return -1 +endi +if $data50 != @18-09-17 20:44:00.000@ then + return -1 +endi +if $data51 != 0.40000 then + return -1 +endi +if $data60 != @18-09-17 20:46:00.000@ then + return -1 +endi +if $data61 != 0.60000 then + return -1 +endi +if $data70 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data71 != 0.80000 then + return -1 +endi +if $data80 != @18-09-17 20:50:00.000@ then + return -1 +endi +if $data81 != 1.00000 then + return -1 +endi + + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts; +if $rows != 6 then + return -1 +endi +if $data00 != @18-09-17 20:33:00.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data31 != 0.20000 then + return -1 +endi +if $data40 != @18-09-17 20:45:00.000@ then + return -1 +endi +if $data41 != 0.50000 then + return -1 +endi +if $data50 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data51 != 0.80000 then + return -1 +endi + +sql select interp(c3) from intp_stb0 where ts >= '2018-09-17 20:35:00.000' and ts <= '2018-09-17 20:50:00.000' interval(3m) fill(linear) order by ts desc; +if $rows != 6 then + return -1 +endi +if $data00 != @18-09-17 20:48:00.000@ then + return -1 +endi +if $data01 != 0.80000 then + return -1 +endi +if $data10 != @18-09-17 20:45:00.000@ then + return -1 +endi +if $data11 != 0.50000 then + return -1 +endi +if $data20 != @18-09-17 20:42:00.000@ then + return -1 +endi +if $data21 != 0.20000 then + return -1 +endi +if $data30 != @18-09-17 20:39:00.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @18-09-17 20:36:00.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @18-09-17 20:33:00.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi From eebc5d40000f793a6cf85778425b5d0f763c17c1 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:04:43 +0800 Subject: [PATCH 043/165] bug fix --- src/wal/src/walWrite.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index c1b3c1ac3f..3b1eb55a72 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -375,15 +375,18 @@ static int walSMemRowCheck(SWalHead *pHead) { int32_t lenExpand = 0; for (int32_t i = 0; i < numOfBlocks; ++i) { expandSubmitBlk(pDestBlks, pSrcBlks, &lenExpand); + pDestBlks = POINTER_SHIFT(pDestBlks, htonl(pDestBlks->dataLen) + sizeof(SSubmitBlk)); + pSrcBlks = POINTER_SHIFT(pSrcBlks, htonl(pSrcBlks->dataLen) + sizeof(SSubmitBlk)); } if (lenExpand > 0) { - pDestMsg->length = htonl(htonl(pDestMsg->length) + lenExpand); + pDestMsg->header.contLen = htonl(pDestMsg->length) + lenExpand; + pDestMsg->length = htonl(pDestMsg->header.contLen); pWalHead->len = pWalHead->len + lenExpand; } memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); tfree(pWalHead); - } + } return 0; } @@ -511,12 +514,10 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch pWal->version = pHead->version; // wInfo("writeFp: %ld", offset); - if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); } - (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From c06123053faac12b3d8a5452d177e6b601c38668 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:17:29 +0800 Subject: [PATCH 044/165] code optimization --- src/wal/src/walWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 3b1eb55a72..8d311d5e3e 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -366,7 +366,7 @@ static int walSMemRowCheck(SWalHead *pHead) { if (pWalHead == NULL) { return TSDB_CODE_WAL_OUT_OF_MEMORY; } - // len should be updated + memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); SSubmitMsg *pDestMsg = (SSubmitMsg *)pWalHead->cont; @@ -386,7 +386,7 @@ static int walSMemRowCheck(SWalHead *pHead) { memcpy(pHead, pWalHead, sizeof(SWalHead) + pWalHead->len); tfree(pWalHead); - } + } return 0; } From 9c73bb0dc5c84d022f9526acd6efcc62a6a1da9d Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Fri, 13 Aug 2021 18:43:25 +0800 Subject: [PATCH 045/165] code optimization --- src/inc/taoserror.h | 1 - src/util/src/terror.c | 1 - src/wal/src/walWrite.c | 9 +++++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index d7e1592911..2214078f55 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -306,7 +306,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) //"Unexpected generic error in wal") #define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001) //"WAL file is corrupted") #define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) //"WAL size exceeds limit") -#define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1003) //"WAL out of memory") // http #define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not onlin") diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 8d2ef29c8c..42fc76e6c9 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -314,7 +314,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, "Invalid msg type") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, "Unexpected generic error in wal") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted") TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit") -TAOS_DEFINE_ERROR(TSDB_CODE_WAL_OUT_OF_MEMORY, "WAL out of memory") // http TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_SERVER_OFFLINE, "http server is not onlin") diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 8d311d5e3e..2dfdb84818 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -123,7 +123,7 @@ static void walUpdateChecksum(SWalHead *pHead) { static int walValidateChecksum(SWalHead *pHead) { if (pHead->sver == 0) { // for compatible with wal before sver 1 return taosCheckChecksumWhole((uint8_t *)pHead, sizeof(*pHead)); - } else if (pHead->sver == 2 || pHead->sver == 1) { + } else if (pHead->sver >= 1) { uint32_t cksum = pHead->cksum; pHead->cksum = 0; return taosCheckChecksum((uint8_t *)pHead, sizeof(*pHead) + pHead->len, cksum); @@ -282,7 +282,7 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, return TSDB_CODE_SUCCESS; } - if (pHead->sver == 2 || pHead->sver == 1) { + if (pHead->sver >= 1) { if (tfRead(tfd, pHead->cont, pHead->len) < pHead->len) { wError("vgId:%d, read to end of corrupted wal file, offset:%" PRId64, pWal->vgId, pos); return TSDB_CODE_WAL_FILE_CORRUPTED; @@ -364,7 +364,7 @@ static int walSMemRowCheck(SWalHead *pHead) { SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); if (pWalHead == NULL) { - return TSDB_CODE_WAL_OUT_OF_MEMORY; + return -1; } memcpy(pWalHead, pHead, sizeof(SWalHead) + sizeof(SSubmitMsg)); @@ -461,7 +461,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch continue; } - if ((pHead->sver == 2 || pHead->sver == 1) && !walValidateChecksum(pHead)) { + if ((pHead->sver >= 1) && !walValidateChecksum(pHead)) { wError("vgId:%d, file:%s, wal whole cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, pHead->version, pHead->len, offset); code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); @@ -517,6 +517,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + return TAOS_SYSTEM_ERROR(errno); } (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); } From 57d5f22a13c5996a8489260c1f5b7329f391b698 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 13 Aug 2021 19:17:29 +0800 Subject: [PATCH 046/165] Hotfix/sangshuduo/td 5872 taosdemo stmt improve for master (#7338) * cherry pick from develop branch. * cherry pick 548131751fad2b0e11e15b75af6c254164f28eea * [TD-5872]: taosdemo stmt csv perf improve. * rand func back to early impl. * fix windows/mac compile error. * cherry pick from develop branch. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 1412 +++++++++++++++++++++++------------ 1 file changed, 932 insertions(+), 480 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index aad2fe95fa..3b91de32b0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -295,6 +295,9 @@ typedef struct SSuperTable_S { uint64_t lenOfTagOfOneRow; char* sampleDataBuf; +#if STMT_IFACE_ENABLED == 1 + char* sampleBindArray; +#endif //int sampleRowCount; //int sampleUsePos; @@ -441,6 +444,7 @@ typedef struct SQueryMetaInfo_S { typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; + int64_t *bind_ts; int threadID; char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; @@ -454,7 +458,7 @@ typedef struct SThreadInfo_S { int64_t start_time; char* cols; bool use_metric; - SSuperTable* superTblInfo; + SSuperTable* stbInfo; char *buffer; // sql cmd buffer // for async insert @@ -674,7 +678,7 @@ static FILE * g_fpOfInsertResult = NULL; /////////////////////////////////////////////////// -static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); } +static void ERROR_EXIT(const char *msg) { errorPrint("%s", msg); exit(-1); } #ifndef TAOSDEMO_COMMIT_SHA1 #define TAOSDEMO_COMMIT_SHA1 "unknown" @@ -1136,8 +1140,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } if (0 == columnCount) { - perror("data type error!"); - exit(-1); + ERROR_EXIT("data type error!"); } g_args.num_of_CPR = columnCount; @@ -2425,14 +2428,14 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port #endif debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); free(request_buf); - ERROR_EXIT("ERROR opening socket"); + ERROR_EXIT("opening socket"); } int retConn = connect(sockfd, (struct sockaddr *)pServAddr, sizeof(struct sockaddr)); debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); if (retConn < 0) { free(request_buf); - ERROR_EXIT("ERROR connecting"); + ERROR_EXIT("connecting"); } memset(base64_buf, 0, INPUT_BUF_LEN); @@ -2465,7 +2468,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port auth, strlen(sqlstr), sqlstr); if (r >= req_buf_len) { free(request_buf); - ERROR_EXIT("ERROR too long request"); + ERROR_EXIT("too long request"); } verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); @@ -2478,7 +2481,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port bytes = write(sockfd, request_buf + sent, req_str_len - sent); #endif if (bytes < 0) - ERROR_EXIT("ERROR writing message to socket"); + ERROR_EXIT("writing message to socket"); if (bytes == 0) break; sent+=bytes; @@ -2495,7 +2498,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port #endif if (bytes < 0) { free(request_buf); - ERROR_EXIT("ERROR reading response from socket"); + ERROR_EXIT("reading response from socket"); } if (bytes == 0) break; @@ -2504,7 +2507,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port if (received == resp_len) { free(request_buf); - ERROR_EXIT("ERROR storing complete response from socket"); + ERROR_EXIT("storing complete response from socket"); } response_buf[RESP_BUF_LEN - 1] = '\0'; @@ -2667,8 +2670,8 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { - printf("get error data type : %s\n", dataType); - exit(-1); + errorPrint("get error data type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -2698,8 +2701,8 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { - printf("get error tag type : %s\n", dataType); - exit(-1); + errorPrint("get error tag type : %s\n", dataType); + exit(EXIT_FAILURE); } } @@ -2737,7 +2740,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_close(taos); errorPrint("%s() LN%d, failed to run command %s\n", __func__, __LINE__, command); - exit(-1); + exit(EXIT_FAILURE); } int64_t childTblCount = (limit < 0)?10000:limit; @@ -2748,7 +2751,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_free_result(res); taos_close(taos); errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } } @@ -2759,7 +2762,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (0 == strlen((char *)row[0])) { errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", __func__, __LINE__, count); - exit(-1); + exit(EXIT_FAILURE); } tstrncpy(pTblName, (char *)row[0], len[0]+1); @@ -2775,12 +2778,12 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); } else { // exit, if allocate more memory failed - errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, sTblName); tmfree(childTblName); taos_free_result(res); taos_close(taos); - exit(-1); + errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", + __func__, __LINE__, dbName, sTblName); + exit(EXIT_FAILURE); } } pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; @@ -2964,10 +2967,10 @@ static int createSuperTable( lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { taos_close(taos); + free(command); errorPrint("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); - free(command); - exit(-1); + exit(EXIT_FAILURE); } } @@ -2976,11 +2979,11 @@ static int createSuperTable( // save for creating child table superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); if (NULL == superTbl->colsOfCreateChildTable) { - errorPrint("%s() LN%d, Failed when calloc, size:%d", - __func__, __LINE__, len+1); taos_close(taos); free(command); - exit(-1); + errorPrint("%s() LN%d, Failed when calloc, size:%d", + __func__, __LINE__, len+1); + exit(EXIT_FAILURE); } snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); @@ -3054,10 +3057,10 @@ static int createSuperTable( lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { taos_close(taos); + free(command); errorPrint("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); - free(command); - exit(-1); + exit(EXIT_FAILURE); } } @@ -3089,7 +3092,7 @@ int createDatabasesAndStables(char *command) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } - + for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.db[i].drop) { sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); @@ -3100,35 +3103,43 @@ int createDatabasesAndStables(char *command) { int dataLen = 0; dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); + BUFFER_SIZE - dataLen, "create database if not exists %s", + g_Dbs.db[i].dbName); if (g_Dbs.db[i].dbCfg.blocks > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); + BUFFER_SIZE - dataLen, " blocks %d", + g_Dbs.db[i].dbCfg.blocks); } if (g_Dbs.db[i].dbCfg.cache > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); + BUFFER_SIZE - dataLen, " cache %d", + g_Dbs.db[i].dbCfg.cache); } if (g_Dbs.db[i].dbCfg.days > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); + BUFFER_SIZE - dataLen, " days %d", + g_Dbs.db[i].dbCfg.days); } if (g_Dbs.db[i].dbCfg.keep > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); + BUFFER_SIZE - dataLen, " keep %d", + g_Dbs.db[i].dbCfg.keep); } if (g_Dbs.db[i].dbCfg.quorum > 1) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); + BUFFER_SIZE - dataLen, " quorum %d", + g_Dbs.db[i].dbCfg.quorum); } if (g_Dbs.db[i].dbCfg.replica > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); + BUFFER_SIZE - dataLen, " replica %d", + g_Dbs.db[i].dbCfg.replica); } if (g_Dbs.db[i].dbCfg.update > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); + BUFFER_SIZE - dataLen, " update %d", + g_Dbs.db[i].dbCfg.update); } //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { // dataLen += snprintf(command + dataLen, @@ -3136,42 +3147,48 @@ int createDatabasesAndStables(char *command) { //} if (g_Dbs.db[i].dbCfg.minRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); + BUFFER_SIZE - dataLen, " minrows %d", + g_Dbs.db[i].dbCfg.minRows); } if (g_Dbs.db[i].dbCfg.maxRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); + BUFFER_SIZE - dataLen, " maxrows %d", + g_Dbs.db[i].dbCfg.maxRows); } if (g_Dbs.db[i].dbCfg.comp > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); + BUFFER_SIZE - dataLen, " comp %d", + g_Dbs.db[i].dbCfg.comp); } if (g_Dbs.db[i].dbCfg.walLevel > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); + BUFFER_SIZE - dataLen, " wal %d", + g_Dbs.db[i].dbCfg.walLevel); } if (g_Dbs.db[i].dbCfg.cacheLast > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); + BUFFER_SIZE - dataLen, " cachelast %d", + g_Dbs.db[i].dbCfg.cacheLast); } if (g_Dbs.db[i].dbCfg.fsync > 0) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, " fsync %d", g_Dbs.db[i].dbCfg.fsync); } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) #if NANO_SECOND_ENABLED == 1 || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "ns", strlen("ns"))) + "ns", 2)) #endif || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "us", strlen("us")))) { + "us", 2))) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); } if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { taos_close(taos); - errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); + errorPrint( "\ncreate database %s failed!\n\n", + g_Dbs.db[i].dbName); return -1; } printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); @@ -3218,7 +3235,7 @@ int createDatabasesAndStables(char *command) { static void* createTable(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("createTable"); @@ -3229,7 +3246,7 @@ static void* createTable(void *sarg) pThreadInfo->buffer = calloc(buff_len, 1); if (pThreadInfo->buffer == NULL) { errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } int len = 0; @@ -3248,11 +3265,11 @@ static void* createTable(void *sarg) g_args.tb_prefix, i, pThreadInfo->cols); } else { - if (superTblInfo == NULL) { + if (stbInfo == NULL) { + free(pThreadInfo->buffer); errorPrint("%s() LN%d, use metric, but super table info is NULL\n", __func__, __LINE__); - free(pThreadInfo->buffer); - exit(-1); + exit(EXIT_FAILURE); } else { if (0 == len) { batchNum = 0; @@ -3260,29 +3277,35 @@ static void* createTable(void *sarg) len += snprintf(pThreadInfo->buffer + len, buff_len - len, "create table "); } + char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(superTblInfo, i); + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, i); } else { + if (0 == stbInfo->tagSampleCount) { + free(pThreadInfo->buffer); + ERROR_EXIT("use sample file for tag, but has no content!\n"); + } tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); + stbInfo, + i % stbInfo->tagSampleCount); } + if (NULL == tagsValBuf) { free(pThreadInfo->buffer); - return NULL; + ERROR_EXIT("use metric, but tag buffer is NULL\n"); } len += snprintf(pThreadInfo->buffer + len, buff_len - len, "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", - pThreadInfo->db_name, superTblInfo->childTblPrefix, + pThreadInfo->db_name, stbInfo->childTblPrefix, i, pThreadInfo->db_name, - superTblInfo->sTblName, tagsValBuf); + stbInfo->sTblName, tagsValBuf); free(tagsValBuf); batchNum++; - if ((batchNum < superTblInfo->batchCreateTableNum) + if ((batchNum < stbInfo->batchCreateTableNum) && ((buff_len - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { + >= (stbInfo->lenOfTagOfOneRow + 256))) { continue; } } @@ -3317,14 +3340,13 @@ static void* createTable(void *sarg) static int startMultiThreadCreateChildTable( char* cols, int threads, uint64_t tableFrom, int64_t ntables, - char* db_name, SSuperTable* superTblInfo) { + char* db_name, SSuperTable* stbInfo) { pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); + ERROR_EXIT("createChildTable malloc failed\n"); } if (threads < 1) { @@ -3344,7 +3366,7 @@ static int startMultiThreadCreateChildTable( threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); - pThreadInfo->superTblInfo = superTblInfo; + pThreadInfo->stbInfo = stbInfo; verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); pThreadInfo->taos = taos_connect( g_Dbs.host, @@ -3451,26 +3473,26 @@ static void createChildTables() { /* Read 10000 lines at most. If more than 10000 lines, continue to read after using */ -static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { +static int readTagFromCsvFileToMem(SSuperTable * stbInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; - FILE *fp = fopen(superTblInfo->tagsFile, "r"); + FILE *fp = fopen(stbInfo->tagsFile, "r"); if (fp == NULL) { printf("Failed to open tags file: %s, reason:%s\n", - superTblInfo->tagsFile, strerror(errno)); + stbInfo->tagsFile, strerror(errno)); return -1; } - if (superTblInfo->tagDataBuf) { - free(superTblInfo->tagDataBuf); - superTblInfo->tagDataBuf = NULL; + if (stbInfo->tagDataBuf) { + free(stbInfo->tagDataBuf); + stbInfo->tagDataBuf = NULL; } int tagCount = 10000; int count = 0; - char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); + char* tagDataBuf = calloc(1, stbInfo->lenOfTagOfOneRow * tagCount); if (tagDataBuf == NULL) { printf("Failed to calloc, reason:%s\n", strerror(errno)); fclose(fp); @@ -3486,20 +3508,20 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { continue; } - memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); + memcpy(tagDataBuf + count * stbInfo->lenOfTagOfOneRow, line, readLen); count++; if (count >= tagCount - 1) { char *tmp = realloc(tagDataBuf, - (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); + (size_t)tagCount*1.5*stbInfo->lenOfTagOfOneRow); if (tmp != NULL) { tagDataBuf = tmp; tagCount = (int)(tagCount*1.5); - memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, - 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); + memset(tagDataBuf + count*stbInfo->lenOfTagOfOneRow, + 0, (size_t)((tagCount-count)*stbInfo->lenOfTagOfOneRow)); } else { // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); + printf("realloc fail for save tag val from %s\n", stbInfo->tagsFile); tmfree(tagDataBuf); free(line); fclose(fp); @@ -3508,8 +3530,8 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { } } - superTblInfo->tagDataBuf = tagDataBuf; - superTblInfo->tagSampleCount = count; + stbInfo->tagDataBuf = tagDataBuf; + stbInfo->tagSampleCount = count; free(line); fclose(fp); @@ -3520,28 +3542,28 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { Read 10000 lines at most. If more than 10000 lines, continue to read after using */ static int readSampleFromCsvFileToMem( - SSuperTable* superTblInfo) { + SSuperTable* stbInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; int getRows = 0; - FILE* fp = fopen(superTblInfo->sampleFile, "r"); + FILE* fp = fopen(stbInfo->sampleFile, "r"); if (fp == NULL) { errorPrint( "Failed to open sample file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + stbInfo->sampleFile, strerror(errno)); return -1; } - assert(superTblInfo->sampleDataBuf); - memset(superTblInfo->sampleDataBuf, 0, - MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); + assert(stbInfo->sampleDataBuf); + memset(stbInfo->sampleDataBuf, 0, + MAX_SAMPLES_ONCE_FROM_FILE * stbInfo->lenOfOneRow); while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { errorPrint( "Failed to fseek file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + stbInfo->sampleFile, strerror(errno)); fclose(fp); return -1; } @@ -3556,13 +3578,13 @@ static int readSampleFromCsvFileToMem( continue; } - if (readLen > superTblInfo->lenOfOneRow) { + if (readLen > stbInfo->lenOfOneRow) { printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", - (int32_t)readLen, superTblInfo->lenOfOneRow); + (int32_t)readLen, stbInfo->lenOfOneRow); continue; } - memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, + memcpy(stbInfo->sampleDataBuf + getRows * stbInfo->lenOfOneRow, line, readLen); getRows++; @@ -5047,6 +5069,23 @@ static void postFreeResource() { free(g_Dbs.db[i].superTbls[j].sampleDataBuf); g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; } +#if STMT_IFACE_ENABLED == 1 + if (g_Dbs.db[i].superTbls[j].sampleBindArray) { + for (int k = 0; k < MAX_SAMPLES_ONCE_FROM_FILE; k++) { + uintptr_t *tmp = (uintptr_t *)(*(uintptr_t *)( + g_Dbs.db[i].superTbls[j].sampleBindArray + + sizeof(uintptr_t *) * k)); + for (int c = 1; c < g_Dbs.db[i].superTbls[j].columnCount + 1; c++) { + TAOS_BIND *bind = (TAOS_BIND *)((char *)tmp + (sizeof(TAOS_BIND) * c)); + if (bind) + tmfree(bind->buffer); + } + tmfree((char *)tmp); + } + } + tmfree((char *)g_Dbs.db[i].superTbls[j].sampleBindArray); +#endif + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { free(g_Dbs.db[i].superTbls[j].tagDataBuf); g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; @@ -5067,21 +5106,14 @@ static void postFreeResource() { tmfree(g_randfloat_buff); tmfree(g_rand_current_buff); tmfree(g_rand_phase_buff); - tmfree(g_randdouble_buff); + } static int getRowDataFromSample( char* dataBuf, int64_t maxLen, int64_t timestamp, - SSuperTable* superTblInfo, int64_t* sampleUsePos) + SSuperTable* stbInfo, int64_t* sampleUsePos) { if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { - /* int ret = readSampleFromCsvFileToMem(superTblInfo); - if (0 != ret) { - tmfree(superTblInfo->sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; - return -1; - } - */ *sampleUsePos = 0; } @@ -5091,8 +5123,8 @@ static int getRowDataFromSample( "(%" PRId64 ", ", timestamp); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%s", - superTblInfo->sampleDataBuf - + superTblInfo->lenOfOneRow * (*sampleUsePos)); + stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * (*sampleUsePos)); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); (*sampleUsePos)++; @@ -5248,7 +5280,7 @@ static int64_t generateData(char *recBuf, char **data_type, if (s == NULL) { errorPrint("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); - exit(-1); + exit(EXIT_FAILURE); } rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -5258,7 +5290,7 @@ static int64_t generateData(char *recBuf, char **data_type, if (s == NULL) { errorPrint("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); - exit(-1); + exit(EXIT_FAILURE); } rand_string(s, lenOfBinary); pstr += sprintf(pstr, ",\"%s\"", s); @@ -5266,8 +5298,7 @@ static int64_t generateData(char *recBuf, char **data_type, } if (strlen(recBuf) > MAX_DATA_SIZE) { - perror("column length too long, abort"); - exit(-1); + ERROR_EXIT("column length too long, abort"); } } @@ -5278,27 +5309,27 @@ static int64_t generateData(char *recBuf, char **data_type, return (int32_t)strlen(recBuf); } -static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { +static int prepareSampleDataForSTable(SSuperTable *stbInfo) { char* sampleDataBuf = NULL; sampleDataBuf = calloc( - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); return -1; } - superTblInfo->sampleDataBuf = sampleDataBuf; - int ret = readSampleFromCsvFileToMem(superTblInfo); + stbInfo->sampleDataBuf = sampleDataBuf; + int ret = readSampleFromCsvFileToMem(stbInfo); if (0 != ret) { errorPrint("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); tmfree(sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; + stbInfo->sampleDataBuf = NULL; return -1; } @@ -5308,14 +5339,14 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) { int32_t affectedRows; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); uint16_t iface; - if (superTblInfo) - iface = superTblInfo->iface; + if (stbInfo) + iface = stbInfo->iface; else { if (g_args.iface == INTERFACE_BUT) iface = TAOSC_IFACE; @@ -5355,7 +5386,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); - exit(-1); + exit(EXIT_FAILURE); } affectedRows = k; break; @@ -5363,7 +5394,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) default: errorPrint("%s() LN%d: unknown insert mode: %d\n", - __func__, __LINE__, superTblInfo->iface); + __func__, __LINE__, stbInfo->iface); affectedRows = 0; } @@ -5373,24 +5404,24 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) static void getTableName(char *pTblName, threadInfo* pThreadInfo, uint64_t tableSeq) { - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - if (superTblInfo) { - if (AUTO_CREATE_SUBTBL != superTblInfo->autoCreateTable) { - if (superTblInfo->childTblLimit > 0) { + SSuperTable* stbInfo = pThreadInfo->stbInfo; + if (stbInfo) { + if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) { + if (stbInfo->childTblLimit > 0) { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + - (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + stbInfo->childTblName + + (tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { verbosePrint("[%d] %s() LN%d: from=%"PRIu64" count=%"PRId64" seq=%"PRIu64"\n", pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, pThreadInfo->ntables, tableSeq); snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", - superTblInfo->childTblPrefix, tableSeq); + stbInfo->childTblPrefix, tableSeq); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%"PRIu64"", @@ -5471,7 +5502,7 @@ static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, } static int32_t generateStbDataTail( - SSuperTable* superTblInfo, + SSuperTable* stbInfo, uint32_t batch, char* buffer, int64_t remainderBufLen, int64_t insertRows, uint64_t recordFrom, int64_t startTime, @@ -5481,7 +5512,7 @@ static int32_t generateStbDataTail( char *pstr = buffer; bool tsRand; - if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { tsRand = true; } else { tsRand = false; @@ -5496,26 +5527,26 @@ static int32_t generateStbDataTail( int64_t lenOfRow = 0; if (tsRand) { - if (superTblInfo->disorderRatio > 0) { - lenOfRow = generateStbRowData(superTblInfo, data, + if (stbInfo->disorderRatio > 0) { + lenOfRow = generateStbRowData(stbInfo, data, remainderBufLen, startTime + getTSRandTail( - superTblInfo->timeStampStep, k, - superTblInfo->disorderRatio, - superTblInfo->disorderRange) + stbInfo->timeStampStep, k, + stbInfo->disorderRatio, + stbInfo->disorderRange) ); } else { - lenOfRow = generateStbRowData(superTblInfo, data, + lenOfRow = generateStbRowData(stbInfo, data, remainderBufLen, - startTime + superTblInfo->timeStampStep * k + startTime + stbInfo->timeStampStep * k ); } } else { lenOfRow = getRowDataFromSample( data, (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, - startTime + superTblInfo->timeStampStep * k, - superTblInfo, + startTime + stbInfo->timeStampStep * k, + stbInfo, pSamplePos); } @@ -5571,7 +5602,7 @@ static int generateSQLHeadWithoutStb(char *tableName, } static int generateStbSQLHead( - SSuperTable* superTblInfo, + SSuperTable* stbInfo, char *tableName, int64_t tableSeq, char *dbName, char *buffer, int remainderBufLen) @@ -5580,14 +5611,14 @@ static int generateStbSQLHead( char headBuf[HEAD_BUFF_LEN]; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagValuesForStb(superTblInfo, tableSeq); + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { tagsValBuf = getTagValueFromTagSample( - superTblInfo, - tableSeq % superTblInfo->tagSampleCount); + stbInfo, + tableSeq % stbInfo->tagSampleCount); } if (NULL == tagsValBuf) { errorPrint("%s() LN%d, tag buf failed to allocate memory\n", @@ -5602,10 +5633,10 @@ static int generateStbSQLHead( dbName, tableName, dbName, - superTblInfo->sTblName, + stbInfo->sTblName, tagsValBuf); tmfree(tagsValBuf); - } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { + } else if (TBL_ALREADY_EXISTS == stbInfo->childTblExists) { len = snprintf( headBuf, HEAD_BUFF_LEN, @@ -5630,12 +5661,12 @@ static int generateStbSQLHead( } static int32_t generateStbInterlaceData( - SSuperTable *superTblInfo, + threadInfo *pThreadInfo, char *tableName, uint32_t batchPerTbl, uint64_t i, uint32_t batchPerTblTimes, uint64_t tableSeq, - threadInfo *pThreadInfo, char *buffer, + char *buffer, int64_t insertRows, int64_t startTime, uint64_t *pRemainderBufLen) @@ -5643,8 +5674,9 @@ static int32_t generateStbInterlaceData( assert(buffer); char *pstr = buffer; + SSuperTable *stbInfo = pThreadInfo->stbInfo; int headLen = generateStbSQLHead( - superTblInfo, + stbInfo, tableName, tableSeq, pThreadInfo->db_name, pstr, *pRemainderBufLen); @@ -5664,12 +5696,12 @@ static int32_t generateStbInterlaceData( pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { startTime = taosGetTimestamp(pThreadInfo->time_precision); } int32_t k = generateStbDataTail( - superTblInfo, + stbInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); @@ -5732,8 +5764,206 @@ static int64_t generateInterlaceDataWithoutStb( } #if STMT_IFACE_ENABLED == 1 -static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, - char *dataType, int32_t dataLen, char **ptr, char *value) +static int32_t prepareStmtBindArrayByType( + TAOS_BIND *bind, + char *dataType, int32_t dataLen, + int32_t timePrec, + char *value) +{ + if (0 == strncasecmp(dataType, + "BINARY", strlen("BINARY"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "binary length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_binary; + + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + if (value) { + bind_binary = calloc(1, strlen(value) + 1); + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + bind_binary = calloc(1, dataLen + 1); + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } + + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "NCHAR", strlen("NCHAR"))) { + if (dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } + char *bind_nchar; + + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + bind_nchar = calloc(1, strlen(value) + 1); + strncpy(bind_nchar, value, strlen(value)); + } else { + bind_nchar = calloc(1, dataLen + 1); + rand_string(bind_nchar, dataLen); + } + + bind->buffer_length = strlen(bind_nchar); + bind->buffer = bind_nchar; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "INT", strlen("INT"))) { + int32_t *bind_int = malloc(sizeof(int32_t)); + + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = bind_int; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "BIGINT", strlen("BIGINT"))) { + int64_t *bind_bigint = malloc(sizeof(int64_t)); + + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_bigint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "FLOAT", strlen("FLOAT"))) { + float *bind_float = malloc(sizeof(float)); + + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + bind->buffer = bind_float; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "DOUBLE", strlen("DOUBLE"))) { + double *bind_double = malloc(sizeof(double)); + + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + bind->buffer = bind_double; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "SMALLINT", strlen("SMALLINT"))) { + int16_t *bind_smallint = malloc(sizeof(int16_t)); + + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = bind_smallint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "TINYINT", strlen("TINYINT"))) { + int8_t *bind_tinyint = malloc(sizeof(int8_t)); + + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_tinyint; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else if (0 == strncasecmp(dataType, + "BOOL", strlen("BOOL"))) { + int8_t *bind_bool = malloc(sizeof(int8_t)); + + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = bind_bool; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + } else if (0 == strncasecmp(dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + int64_t *bind_ts2 = malloc(sizeof(int64_t)); + + if (value) { + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } + } else { + *bind_ts2 = rand_bigint(); + } + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts2; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + errorPrint( "No support data type: %s\n", dataType); + return -1; + } + + return 0; +} + +static int32_t prepareStmtBindArrayByTypeForRand( + TAOS_BIND *bind, + char *dataType, int32_t dataLen, + int32_t timePrec, + char **ptr, + char *value) { if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { @@ -5814,7 +6044,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "FLOAT", strlen("FLOAT"))) { - float *bind_float = (float *) *ptr; + float *bind_float = (float *)*ptr; if (value) { *bind_float = (float)atof(value); @@ -5830,7 +6060,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "DOUBLE", strlen("DOUBLE"))) { - double *bind_double = (double *)*ptr; + double *bind_double = (double *)*ptr; if (value) { *bind_double = atof(value); @@ -5862,7 +6092,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "TINYINT", strlen("TINYINT"))) { - int8_t *bind_tinyint = (int8_t *)*ptr; + int8_t *bind_tinyint = (int8_t *)*ptr; if (value) { *bind_tinyint = (int8_t)atoi(value); @@ -5874,12 +6104,21 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, bind->buffer = bind_tinyint; bind->length = &bind->buffer_length; bind->is_null = NULL; + *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "BOOL", strlen("BOOL"))) { - int8_t *bind_bool = (int8_t *)*ptr; + int8_t *bind_bool = (int8_t *)*ptr; - *bind_bool = rand_bool(); + if (value) { + if (strncasecmp(value, "true", 4)) { + *bind_bool = true; + } else { + *bind_bool = false; + } + } else { + *bind_bool = rand_bool(); + } bind->buffer_type = TSDB_DATA_TYPE_BOOL; bind->buffer_length = sizeof(int8_t); bind->buffer = bind_bool; @@ -5889,10 +6128,28 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else if (0 == strncasecmp(dataType, "TIMESTAMP", strlen("TIMESTAMP"))) { - int64_t *bind_ts2 = (int64_t *) *ptr; + int64_t *bind_ts2 = (int64_t *)*ptr; if (value) { - *bind_ts2 = atoll(value); + if (strchr(value, ':') && strchr(value, '-')) { + int i = 0; + while(value[i] != '\0') { + if (value[i] == '\"' || value[i] == '\'') { + value[i] = ' '; + } + i++; + } + int64_t tmpEpoch; + if (TSDB_CODE_SUCCESS != taosParseTime( + value, &tmpEpoch, strlen(value), + timePrec, 0)) { + errorPrint("Input %s, time format error!\n", value); + return -1; + } + *bind_ts2 = tmpEpoch; + } else { + *bind_ts2 = atoll(value); + } } else { *bind_ts2 = rand_bigint(); } @@ -5912,13 +6169,14 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, } static int32_t prepareStmtWithoutStb( - TAOS_STMT *stmt, + threadInfo *pThreadInfo, char *tableName, uint32_t batch, int64_t insertRows, int64_t recordFrom, int64_t startTime) { + TAOS_STMT *stmt = pThreadInfo->stmt; int ret = taos_stmt_set_tbname(stmt, tableName); if (ret != 0) { errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", @@ -5938,15 +6196,11 @@ static int32_t prepareStmtWithoutStb( int32_t k = 0; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); - char *ptr = data; TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); - int64_t *bind_ts; + int64_t *bind_ts = pThreadInfo->bind_ts; - bind_ts = (int64_t *)ptr; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; if (g_args.disorderRatio) { @@ -5962,8 +6216,6 @@ static int32_t prepareStmtWithoutStb( bind->length = &bind->buffer_length; bind->is_null = NULL; - ptr += bind->buffer_length; - for (int i = 0; i < g_args.num_of_CPR; i ++) { bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); @@ -5971,7 +6223,8 @@ static int32_t prepareStmtWithoutStb( bind, data_type[i], g_args.len_of_binary, - &ptr, NULL)) { + pThreadInfo->time_precision, + NULL)) { return -1; } } @@ -5998,10 +6251,42 @@ static int32_t prepareStmtWithoutStb( return k; } -static int32_t prepareStbStmtBind( - char *bindArray, SSuperTable *stbInfo, bool sourceRand, +static int32_t prepareStbStmtBindTag( + char *bindArray, SSuperTable *stbInfo, + char *tagsVal, + int32_t timePrec) +{ + char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, g_args.len_of_binary); + return -1; + } + + TAOS_BIND *tag; + + for (int t = 0; t < stbInfo->tagCount; t ++) { + tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); + if ( -1 == prepareStmtBindArrayByType( + tag, + stbInfo->tags[t].dataType, + stbInfo->tags[t].dataLen, + timePrec, + NULL)) { + free(bindBuffer); + return -1; + } + } + + free(bindBuffer); + return 0; +} + +static int32_t prepareStbStmtBindRand( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, int64_t startTime, int32_t recSeq, - bool isColumn) + int32_t timePrec) { char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { @@ -6016,121 +6301,92 @@ static int32_t prepareStbStmtBind( TAOS_BIND *bind; - if (isColumn) { - int cursor = 0; + for (int i = 0; i < stbInfo->columnCount + 1; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); - for (int i = 0; i < stbInfo->columnCount + 1; i ++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + if (i == 0) { + int64_t *bind_ts = ts; - if (i == 0) { - int64_t *bind_ts; - - bind_ts = (int64_t *)ptr; - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (stbInfo->disorderRatio) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, recSeq, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * recSeq; - } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - - ptr += bind->buffer_length; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); } else { - - if (sourceRand) { - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i-1].dataType, - stbInfo->columns[i-1].dataLen, - &ptr, - NULL)) { - free(bindBuffer); - return -1; - } - } else { - char *restStr = stbInfo->sampleDataBuf + cursor; - int lengthOfRest = strlen(restStr); - - int index = 0; - for (index = 0; index < lengthOfRest; index ++) { - if (restStr[index] == ',') { - break; - } - } - - memset(bindBuffer, 0, g_args.len_of_binary); - strncpy(bindBuffer, restStr, index); - cursor += index + 1; // skip ',' too - - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i-1].dataType, - stbInfo->columns[i-1].dataLen, - &ptr, - bindBuffer)) { - free(bindBuffer); - return -1; - } - } + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; } - } - } else { - TAOS_BIND *tag; + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; - for (int t = 0; t < stbInfo->tagCount; t ++) { - tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); - if ( -1 == prepareStmtBindArrayByType( - tag, - stbInfo->tags[t].dataType, - stbInfo->tags[t].dataLen, - &ptr, - NULL)) { - free(bindBuffer); - return -1; - } + ptr += bind->buffer_length; + } else if ( -1 == prepareStmtBindArrayByTypeForRand( + bind, + stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].dataLen, + timePrec, + &ptr, + NULL)) { + tmfree(bindBuffer); + return -1; } - } - free(bindBuffer); + tmfree(bindBuffer); return 0; } -static int32_t prepareStbStmt( - SSuperTable *stbInfo, - TAOS_STMT *stmt, +static int32_t prepareStbStmtBindWithSample( + int64_t *ts, + char *bindArray, SSuperTable *stbInfo, + int64_t startTime, int32_t recSeq, + int32_t timePrec, + int64_t samplePos) +{ + TAOS_BIND *bind; + + bind = (TAOS_BIND *)bindArray; + + int64_t *bind_ts = ts; + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + return 0; +} + +static int32_t prepareStbStmtRand( + threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, uint32_t batch, uint64_t insertRows, uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) + int64_t startTime) { int ret; - - bool sourceRand; - if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { - sourceRand = true; - } else { - sourceRand = false; // from sample data file - } + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { char* tagsValBuf = NULL; - bool tagRand; if (0 == stbInfo->tagSource) { - tagRand = true; tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); } else { - tagRand = false; tagsValBuf = getTagValueFromTagSample( stbInfo, tableSeq % stbInfo->tagSampleCount); @@ -6150,8 +6406,9 @@ static int32_t prepareStbStmt( return -1; } - if (-1 == prepareStbStmtBind( - tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) { + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { tmfree(tagsValBuf); tmfree(tagsArray); return -1; @@ -6186,8 +6443,12 @@ static int32_t prepareStbStmt( uint32_t k; for (k = 0; k < batch;) { /* columnCount + 1 (ts) */ - if (-1 == prepareStbStmtBind(bindArray, stbInfo, sourceRand, - startTime, k, true /* is column */)) { + if (-1 == prepareStbStmtBindRand( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision + /* is column */)) { free(bindArray); return -1; } @@ -6210,10 +6471,6 @@ static int32_t prepareStbStmt( k++; recordFrom ++; - if (!sourceRand) { - (*pSamplePos) ++; - } - if (recordFrom >= insertRows) { break; } @@ -6223,9 +6480,8 @@ static int32_t prepareStbStmt( return k; } -static int32_t prepareStbStmtInterlace( - SSuperTable *stbInfo, - TAOS_STMT *stmt, +static int32_t prepareStbStmtWithSample( + threadInfo *pThreadInfo, char *tableName, int64_t tableSeq, uint32_t batch, @@ -6234,41 +6490,109 @@ static int32_t prepareStbStmtInterlace( int64_t startTime, int64_t *pSamplePos) { - return prepareStbStmt( - stbInfo, - stmt, - tableName, - tableSeq, - batch, - insertRows, 0, startTime, - pSamplePos); -} + int ret; + SSuperTable *stbInfo = pThreadInfo->stbInfo; + TAOS_STMT *stmt = pThreadInfo->stmt; -static int32_t prepareStbStmtProgressive( - SSuperTable *stbInfo, - TAOS_STMT *stmt, - char *tableName, - int64_t tableSeq, - uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime, - int64_t *pSamplePos) -{ - return prepareStbStmt( - stbInfo, - stmt, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, recordFrom, startTime, - pSamplePos); -} + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == stbInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } + + if (NULL == tagsValBuf) { + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + if (-1 == prepareStbStmtBindTag( + tagsArray, stbInfo, tagsValBuf, pThreadInfo->time_precision + /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } + + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + + uint32_t k; + for (k = 0; k < batch;) { + char *bindArray = (char *)(*((uintptr_t *) + (stbInfo->sampleBindArray + (sizeof(char *)) * (*pSamplePos)))); + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBindWithSample( + pThreadInfo->bind_ts, + bindArray, stbInfo, + startTime, k, + pThreadInfo->time_precision, + *pSamplePos + /* is column */)) { + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + // if msg > 3MB, break + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + + k++; + recordFrom ++; + + (*pSamplePos) ++; + if ((*pSamplePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + *pSamplePos = 0; + } + + if (recordFrom >= insertRows) { + break; + } + } + + return k; +} #endif static int32_t generateStbProgressiveData( - SSuperTable *superTblInfo, + SSuperTable *stbInfo, char *tableName, int64_t tableSeq, char *dbName, char *buffer, @@ -6282,7 +6606,7 @@ static int32_t generateStbProgressiveData( memset(pstr, 0, *pRemainderBufLen); int64_t headLen = generateStbSQLHead( - superTblInfo, + stbInfo, tableName, tableSeq, dbName, buffer, *pRemainderBufLen); @@ -6294,7 +6618,7 @@ static int32_t generateStbProgressiveData( int64_t dataLen; - return generateStbDataTail(superTblInfo, + return generateStbDataTail(stbInfo, g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, startTime, @@ -6354,26 +6678,34 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t nTimeStampStep; uint64_t insert_interval; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + bool sourceRand; - if (superTblInfo) { - insertRows = superTblInfo->insertRows; + SSuperTable* stbInfo = pThreadInfo->stbInfo; - if ((superTblInfo->interlaceRows == 0) + if (stbInfo) { + insertRows = stbInfo->insertRows; + + if ((stbInfo->interlaceRows == 0) && (g_args.interlace_rows > 0)) { interlaceRows = g_args.interlace_rows; } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = stbInfo->interlaceRows; + } + maxSqlLen = stbInfo->maxSqlLen; + nTimeStampStep = stbInfo->timeStampStep; + insert_interval = stbInfo->insertInterval; + if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { + sourceRand = true; + } else { + sourceRand = false; // from sample data file } - maxSqlLen = superTblInfo->maxSqlLen; - nTimeStampStep = superTblInfo->timeStampStep; - insert_interval = superTblInfo->insertInterval; } else { insertRows = g_args.num_of_DPT; interlaceRows = g_args.interlace_rows; maxSqlLen = g_args.max_sql_len; nTimeStampStep = g_args.timestamp_step; insert_interval = g_args.insert_interval; + sourceRand = true; } debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", @@ -6456,28 +6788,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { uint64_t oldRemainderLen = remainderBufLen; int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtInterlace( - superTblInfo, - pThreadInfo->stmt, - tableName, - tableSeq, - batchPerTbl, - insertRows, i, - startTime, - &(pThreadInfo->samplePos)); + if (sourceRand) { + generated = prepareStbStmtRand( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime + ); + } else { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + batchPerTbl, + insertRows, 0, + startTime, + &(pThreadInfo->samplePos)); + } #else generated = -1; #endif } else { generated = generateStbInterlaceData( - superTblInfo, + pThreadInfo, tableName, batchPerTbl, i, batchPerTblTimes, tableSeq, - pThreadInfo, pstr, + pstr, insertRows, startTime, &remainderBufLen); @@ -6490,7 +6832,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { tableName, batchPerTbl, startTime); #if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( - pThreadInfo->stmt, tableName, + pThreadInfo, + tableName, batchPerTbl, insertRows, i, startTime); @@ -6639,12 +6982,12 @@ free_of_interlace: static void* syncWriteProgressive(threadInfo *pThreadInfo) { debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + SSuperTable* stbInfo = pThreadInfo->stbInfo; + uint64_t maxSqlLen = stbInfo?stbInfo->maxSqlLen:g_args.max_sql_len; int64_t timeStampStep = - superTblInfo?superTblInfo->timeStampStep:g_args.timestamp_step; + stbInfo?stbInfo->timeStampStep:g_args.timestamp_step; int64_t insertRows = - (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; + (stbInfo)?stbInfo->insertRows:g_args.num_of_DPT; verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); @@ -6663,6 +7006,17 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; + bool sourceRand; + if (stbInfo) { + if (0 == strncasecmp(stbInfo->dataSource, "rand", 4)) { + sourceRand = true; + } else { + sourceRand = false; // from sample data file + } + } else { + sourceRand = true; + } + pThreadInfo->samplePos = 0; int percentComplete = 0; @@ -6696,24 +7050,35 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { remainderBufLen -= len; int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + if (stbInfo) { + if (stbInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmtProgressive( - superTblInfo, - pThreadInfo->stmt, - tableName, - tableSeq, - g_args.num_of_RPR, - insertRows, i, start_time, - &(pThreadInfo->samplePos)); + if (sourceRand) { + generated = prepareStbStmtRand( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, + i, start_time + ); + } else { + generated = prepareStbStmtWithSample( + pThreadInfo, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); + } #else generated = -1; #endif } else { generated = generateStbProgressiveData( - superTblInfo, - tableName, tableSeq, pThreadInfo->db_name, pstr, + stbInfo, + tableName, tableSeq, + pThreadInfo->db_name, pstr, insertRows, i, start_time, &(pThreadInfo->samplePos), &remainderBufLen); @@ -6722,7 +7087,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (g_args.iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 generated = prepareStmtWithoutStb( - pThreadInfo->stmt, + pThreadInfo, tableName, g_args.num_of_RPR, insertRows, i, @@ -6792,9 +7157,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { } // num_of_DPT if ((g_args.verbose_print) && - (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) + (tableSeq == pThreadInfo->ntables - 1) && (stbInfo) && (0 == strncasecmp( - superTblInfo->dataSource, + stbInfo->dataSource, "sample", strlen("sample")))) { verbosePrint("%s() LN%d samplePos=%"PRId64"\n", __func__, __LINE__, pThreadInfo->samplePos); @@ -6812,18 +7177,18 @@ free_of_progressive: static void* syncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("syncWrite"); uint32_t interlaceRows; - if (superTblInfo) { - if ((superTblInfo->interlaceRows == 0) + if (stbInfo) { + if ((stbInfo->interlaceRows == 0) && (g_args.interlace_rows > 0)) { interlaceRows = g_args.interlace_rows; } else { - interlaceRows = superTblInfo->interlaceRows; + interlaceRows = stbInfo->interlaceRows; } } else { interlaceRows = g_args.interlace_rows; @@ -6840,10 +7205,10 @@ static void* syncWrite(void *sarg) { static void callBack(void *param, TAOS_RES *res, int code) { threadInfo* pThreadInfo = (threadInfo*)param; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + stbInfo?stbInfo->insertInterval:g_args.insert_interval; if (insert_interval) { pThreadInfo->et = taosGetTimestampMs(); if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { @@ -6851,13 +7216,13 @@ static void callBack(void *param, TAOS_RES *res, int code) { } } - char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); + char *buffer = calloc(1, pThreadInfo->stbInfo->maxSqlLen); char data[MAX_DATA_SIZE]; char *pstr = buffer; pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", pThreadInfo->db_name, pThreadInfo->tb_prefix, pThreadInfo->start_table_from); - // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + // if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { if (pThreadInfo->counter >= g_args.num_of_RPR) { pThreadInfo->start_table_from++; pThreadInfo->counter = 0; @@ -6871,15 +7236,15 @@ static void callBack(void *param, TAOS_RES *res, int code) { for (int i = 0; i < g_args.num_of_RPR; i++) { int rand_num = taosRandom() % 100; - if (0 != pThreadInfo->superTblInfo->disorderRatio - && rand_num < pThreadInfo->superTblInfo->disorderRatio) { + if (0 != pThreadInfo->stbInfo->disorderRatio + && rand_num < pThreadInfo->stbInfo->disorderRatio) { int64_t d = pThreadInfo->lastTs - - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->superTblInfo, data, + - (taosRandom() % pThreadInfo->stbInfo->disorderRange + 1); + generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, d); } else { - generateStbRowData(pThreadInfo->superTblInfo, + generateStbRowData(pThreadInfo->stbInfo, data, MAX_DATA_SIZE, pThreadInfo->lastTs += 1000); @@ -6887,7 +7252,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { pstr += sprintf(pstr, "%s", data); pThreadInfo->counter++; - if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= pThreadInfo->stbInfo->insertRows) { break; } } @@ -6903,7 +7268,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { static void *asyncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* stbInfo = pThreadInfo->stbInfo; setThreadName("asyncWrite"); @@ -6912,7 +7277,7 @@ static void *asyncWrite(void *sarg) { pThreadInfo->lastTs = pThreadInfo->start_time; int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + stbInfo?stbInfo->insertInterval:g_args.insert_interval; if (insert_interval) { pThreadInfo->st = taosGetTimestampMs(); } @@ -6949,8 +7314,81 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * return 0; } +#if STMT_IFACE_ENABLED == 1 +static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) +{ + stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + if (stbInfo->sampleBindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + __func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); + return -1; + } + + + for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { + char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + if (bindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (stbInfo->columnCount + 1)); + return -1; + } + + + TAOS_BIND *bind; + int cursor = 0; + + for (int c = 0; c < stbInfo->columnCount + 1; c++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * c)); + + if (c == 0) { + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = NULL; //bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + } else { + char *restStr = stbInfo->sampleDataBuf + + stbInfo->lenOfOneRow * i + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + char *bindBuffer = calloc(1, index + 1); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, DOUBLE_BUFF_LEN); + return -1; + } + + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if (-1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[c-1].dataType, + stbInfo->columns[c-1].dataLen, + timePrec, + bindBuffer)) { + free(bindBuffer); + return -1; + } + free(bindBuffer); + } + } + *((uintptr_t *)(stbInfo->sampleBindArray + (sizeof(char *)) * i)) = (uintptr_t)bindArray; + } + + return 0; +} +#endif + static void startMultiThreadInsertData(int threads, char* db_name, - char* precision, SSuperTable* superTblInfo) { + char* precision, SSuperTable* stbInfo) { int32_t timePrec = TSDB_TIME_PRECISION_MILLI; if (0 != precision[0]) { @@ -6964,19 +7402,19 @@ static void startMultiThreadInsertData(int threads, char* db_name, #endif } else { errorPrint("Not support precision: %s\n", precision); - exit(-1); + exit(EXIT_FAILURE); } } int64_t start_time; - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + if (stbInfo) { + if (0 == strncasecmp(stbInfo->startTimestamp, "now", 3)) { start_time = taosGetTimestamp(timePrec); } else { if (TSDB_CODE_SUCCESS != taosParseTime( - superTblInfo->startTimestamp, + stbInfo->startTimestamp, &start_time, - strlen(superTblInfo->startTimestamp), + strlen(stbInfo->startTimestamp), timePrec, 0)) { ERROR_EXIT("failed to parse time!\n"); } @@ -6990,12 +7428,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t start = taosGetTimestampMs(); // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, + if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { - if (0 != prepareSampleDataForSTable(superTblInfo)) { + if (0 != prepareSampleDataForSTable(stbInfo)) { errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } } @@ -7005,68 +7443,68 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (NULL == taos0) { errorPrint("%s() LN%d, connect to server fail , reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } int64_t ntables = 0; uint64_t tableFrom; - if (superTblInfo) { + if (stbInfo) { int64_t limit; uint64_t offset; if ((NULL != g_args.sqlFile) - && (superTblInfo->childTblExists == TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset != 0) - || (superTblInfo->childTblLimit >= 0))) { + && (stbInfo->childTblExists == TBL_NO_EXISTS) + && ((stbInfo->childTblOffset != 0) + || (stbInfo->childTblLimit >= 0))) { printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); } - if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) { - if ((superTblInfo->childTblLimit < 0) - || ((superTblInfo->childTblOffset - + superTblInfo->childTblLimit) - > (superTblInfo->childTblCount))) { - superTblInfo->childTblLimit = - superTblInfo->childTblCount - superTblInfo->childTblOffset; + if (stbInfo->childTblExists == TBL_ALREADY_EXISTS) { + if ((stbInfo->childTblLimit < 0) + || ((stbInfo->childTblOffset + + stbInfo->childTblLimit) + > (stbInfo->childTblCount))) { + stbInfo->childTblLimit = + stbInfo->childTblCount - stbInfo->childTblOffset; } - offset = superTblInfo->childTblOffset; - limit = superTblInfo->childTblLimit; + offset = stbInfo->childTblOffset; + limit = stbInfo->childTblLimit; } else { - limit = superTblInfo->childTblCount; + limit = stbInfo->childTblCount; offset = 0; } ntables = limit; tableFrom = offset; - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) - > superTblInfo->childTblCount)) { + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && ((stbInfo->childTblOffset + stbInfo->childTblLimit) + > stbInfo->childTblCount)) { printf("WARNING: specified offset + limit > child table count!\n"); prompt(); } - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && (0 == superTblInfo->childTblLimit)) { + if ((stbInfo->childTblExists != TBL_NO_EXISTS) + && (0 == stbInfo->childTblLimit)) { printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); prompt(); } - superTblInfo->childTblName = (char*)calloc(1, + stbInfo->childTblName = (char*)calloc(1, limit * TSDB_TABLE_NAME_LEN); - if (superTblInfo->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + if (stbInfo->childTblName == NULL) { taos_close(taos0); - exit(-1); + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + exit(EXIT_FAILURE); } int64_t childTblCount; getChildNameOfSuperTableWithLimitAndOffset( taos0, - db_name, superTblInfo->sTblName, - &superTblInfo->childTblName, &childTblCount, + db_name, stbInfo->sTblName, + &stbInfo->childTblName, &childTblCount, limit, offset); } else { @@ -7087,11 +7525,11 @@ static void startMultiThreadInsertData(int threads, char* db_name, b = ntables % threads; } - if ((superTblInfo) - && (superTblInfo->iface == REST_IFACE)) { + if ((stbInfo) + && (stbInfo->iface == REST_IFACE)) { if (convertHostToServAddr( g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { - exit(-1); + ERROR_EXIT("convert host to server address"); } } @@ -7104,98 +7542,110 @@ static void startMultiThreadInsertData(int threads, char* db_name, memset(pids, 0, threads * sizeof(pthread_t)); memset(infos, 0, threads * sizeof(threadInfo)); +#if STMT_IFACE_ENABLED == 1 + char *stmtBuffer = calloc(1, BUFFER_SIZE); + assert(stmtBuffer); + if ((g_args.iface == STMT_IFACE) + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { + char *pstr = stmtBuffer; + + if ((stbInfo) + && (AUTO_CREATE_SUBTBL + == stbInfo->autoCreateTable)) { + pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", + stbInfo->sTblName); + for (int tag = 0; tag < (stbInfo->tagCount - 1); + tag ++ ) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ") VALUES(?"); + } else { + pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); + } + + int columnCount; + if (stbInfo) { + columnCount = stbInfo->columnCount; + } else { + columnCount = g_args.num_of_CPR; + } + + for (int col = 0; col < columnCount; col ++) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ")"); + + debugPrint("%s() LN%d, stmtBuffer: %s", __func__, __LINE__, stmtBuffer); + + if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, + "sample", strlen("sample")))) { + parseSampleFileToStmt(stbInfo, timePrec); + } + } +#endif + for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->time_precision = timePrec; - pThreadInfo->superTblInfo = superTblInfo; + pThreadInfo->stbInfo = stbInfo; pThreadInfo->start_time = start_time; pThreadInfo->minDelay = UINT64_MAX; - if ((NULL == superTblInfo) || - (superTblInfo->iface != REST_IFACE)) { + if ((NULL == stbInfo) || + (stbInfo->iface != REST_IFACE)) { //t_info->taos = taos; pThreadInfo->taos = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == pThreadInfo->taos) { + free(infos); errorPrint( "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); - free(infos); - exit(-1); + exit(EXIT_FAILURE); } #if STMT_IFACE_ENABLED == 1 if ((g_args.iface == STMT_IFACE) - || ((superTblInfo) - && (superTblInfo->iface == STMT_IFACE))) { + || ((stbInfo) + && (stbInfo->iface == STMT_IFACE))) { - int columnCount; - if (superTblInfo) { - columnCount = superTblInfo->columnCount; - } else { - columnCount = g_args.num_of_CPR; - } pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); if (NULL == pThreadInfo->stmt) { + free(pids); + free(infos); errorPrint( "%s() LN%d, failed init stmt, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); + exit(EXIT_FAILURE); + } + + int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0); + if (ret != 0){ free(pids); free(infos); - exit(-1); - } - - char *buffer = calloc(1, BUFFER_SIZE); - assert(buffer); - char *pstr = buffer; - - if ((superTblInfo) - && (AUTO_CREATE_SUBTBL - == superTblInfo->autoCreateTable)) { - pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", - superTblInfo->sTblName); - for (int tag = 0; tag < (superTblInfo->tagCount - 1); - tag ++ ) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ") VALUES(?"); - } else { - pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - } - - for (int col = 0; col < columnCount; col ++) { - pstr += sprintf(pstr, ",?"); - } - pstr += sprintf(pstr, ")"); - - debugPrint("%s() LN%d, buffer: %s", __func__, __LINE__, buffer); - int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0); - if (ret != 0){ + free(stmtBuffer); errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", ret, taos_stmt_errstr(pThreadInfo->stmt)); - free(pids); - free(infos); - free(buffer); - exit(-1); + exit(EXIT_FAILURE); } - - free(buffer); + pThreadInfo->bind_ts = malloc(sizeof(int64_t)); } #endif } else { pThreadInfo->taos = NULL; } - /* if ((NULL == superTblInfo) - || (0 == superTblInfo->multiThreadWriteOneTbl)) { + /* if ((NULL == stbInfo) + || (0 == stbInfo->multiThreadWriteOneTbl)) { */ pThreadInfo->start_table_from = tableFrom; pThreadInfo->ntables = iend_table_to + 1; /* } else { pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = superTblInfo->childTblCount; + pThreadInfo->ntables = stbInfo->childTblCount; pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); } */ @@ -7215,6 +7665,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } +#if STMT_IFACE_ENABLED == 1 + free(stmtBuffer); +#endif + for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } @@ -7228,11 +7682,10 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; - tsem_destroy(&(pThreadInfo->lock_sem)); - #if STMT_IFACE_ENABLED == 1 if (pThreadInfo->stmt) { taos_stmt_close(pThreadInfo->stmt); + tmfree((char *)pThreadInfo->bind_ts); } #endif tsem_destroy(&(pThreadInfo->lock_sem)); @@ -7242,9 +7695,9 @@ static void startMultiThreadInsertData(int threads, char* db_name, __func__, __LINE__, pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); - if (superTblInfo) { - superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; - superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows; + if (stbInfo) { + stbInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; + stbInfo->totalInsertRows += pThreadInfo->totalInsertRows; } else { g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; g_args.totalInsertRows += pThreadInfo->totalInsertRows; @@ -7265,22 +7718,22 @@ static void startMultiThreadInsertData(int threads, char* db_name, double tInMs = t/1000.0; - if (superTblInfo) { + if (stbInfo) { fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->sTblName, (tInMs)? - (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); if (g_fpOfInsertResult) { fprintf(g_fpOfInsertResult, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - tInMs, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, + tInMs, stbInfo->totalInsertRows, + stbInfo->totalAffectedRows, + threads, db_name, stbInfo->sTblName, (tInMs)? - (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + (double)(stbInfo->totalInsertRows/tInMs):FLT_MAX); } } else { fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", @@ -7335,8 +7788,8 @@ static void *readTable(void *sarg) { } int64_t num_of_DPT; - /* if (pThreadInfo->superTblInfo) { - num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table; + /* if (pThreadInfo->stbInfo) { + num_of_DPT = pThreadInfo->stbInfo->insertRows; // nrecords_per_table; } else { */ num_of_DPT = g_args.num_of_DPT; @@ -7410,7 +7863,7 @@ static void *readMetric(void *sarg) { return NULL; } - int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows; + int64_t num_of_DPT = pThreadInfo->stbInfo->insertRows; int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; int64_t totalData = num_of_DPT * num_of_tables; bool do_aggreFunc = g_Dbs.do_aggreFunc; @@ -7549,14 +8002,14 @@ static int insertTestProcess() { if (g_Dbs.db[i].superTblCount > 0) { for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; + SSuperTable* stbInfo = &g_Dbs.db[i].superTbls[j]; - if (superTblInfo && (superTblInfo->insertRows > 0)) { + if (stbInfo && (stbInfo->insertRows > 0)) { startMultiThreadInsertData( g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, - superTblInfo); + stbInfo); } } } @@ -7776,7 +8229,7 @@ static int queryTestProcess() { if (taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } if (0 != g_queryInfo.superQueryInfo.sqlCount) { @@ -7796,7 +8249,7 @@ static int queryTestProcess() { if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { if (convertHostToServAddr( g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) - exit(-1); + ERROR_EXIT("convert host to server address"); } pthread_t *pids = NULL; @@ -8000,10 +8453,10 @@ static void *superSubscribe(void *sarg) { setThreadName("superSub"); if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { + free(subSqlStr); errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); - free(subSqlStr); - exit(-1); + exit(EXIT_FAILURE); } if (pThreadInfo->taos == NULL) { @@ -8269,7 +8722,7 @@ static int subscribeTestProcess() { if (taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - exit(-1); + exit(EXIT_FAILURE); } if (0 != g_queryInfo.superQueryInfo.sqlCount) { @@ -8298,7 +8751,7 @@ static int subscribeTestProcess() { errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); - exit(-1); + exit(EXIT_FAILURE); } pids = calloc( @@ -8313,7 +8766,7 @@ static int subscribeTestProcess() { sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); - exit(-1); + exit(EXIT_FAILURE); } for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { @@ -8350,7 +8803,7 @@ static int subscribeTestProcess() { errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); // taos_close(taos); - exit(-1); + exit(EXIT_FAILURE); } int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; @@ -8553,8 +9006,7 @@ static int regexMatch(const char *s, const char *reg, int cflags) { /* Compile regular expression */ if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); + ERROR_EXIT("Fail to compile regex\n"); } /* Execute regular expression */ @@ -8567,9 +9019,9 @@ static int regexMatch(const char *s, const char *reg, int cflags) { return 0; } else { regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); regfree(®ex); - exit(-1); + printf("Regex match failed: %s\n", msgbuf); + exit(EXIT_FAILURE); } return 0; @@ -8671,7 +9123,7 @@ static void queryResult() { if (g_args.use_metric) { pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + pThreadInfo->stbInfo = &g_Dbs.db[0].superTbls[0]; tstrncpy(pThreadInfo->tb_prefix, g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); } else { @@ -8687,10 +9139,10 @@ static void queryResult() { g_Dbs.db[0].dbName, g_Dbs.port); if (pThreadInfo->taos == NULL) { + free(pThreadInfo); errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - free(pThreadInfo); - exit(-1); + exit(EXIT_FAILURE); } tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); From d5f38ac9c92f452386771fa7bfefcf29b2c94520 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 08:57:03 +0800 Subject: [PATCH 047/165] do further check for blk with len 0 and SKVRow --- src/wal/src/walWrite.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 2dfdb84818..7523369dc2 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -309,36 +309,63 @@ static int32_t walSkipCorruptedRecord(SWal *pWal, SWalHead *pHead, int64_t tfd, } // Add SMemRowType ahead of SDataRow static void expandSubmitBlk(SSubmitBlk *pDest, SSubmitBlk *pSrc, int32_t *lenExpand) { + // copy the header firstly memcpy(pDest, pSrc, sizeof(SSubmitBlk)); - int nRows = htons(pSrc->numOfRows); - if (nRows <= 0) { + + int32_t nRows = htons(pDest->numOfRows); + int32_t dataLen = htonl(pDest->dataLen); + + if ((nRows <= 0) || (dataLen <= 0)) { return; } + char *pDestData = pDest->data; char *pSrcData = pSrc->data; - for (int i = 0; i < nRows; ++i) { + for (int32_t i = 0; i < nRows; ++i) { memRowSetType(pDestData, SMEM_ROW_DATA); memcpy(memRowDataBody(pDestData), pSrcData, dataRowLen(pSrcData)); pDestData = POINTER_SHIFT(pDestData, memRowTLen(pDestData)); pSrcData = POINTER_SHIFT(pSrcData, dataRowLen(pSrcData)); ++(*lenExpand); } - int32_t dataLen = htonl(pDest->dataLen); pDest->dataLen = htonl(dataLen + nRows * sizeof(uint8_t)); } +// Check SDataRow by comparing the SDataRow len and SSubmitBlk dataLen static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { - int32_t len = 0; + if ((nRows <= 0) || (dataLen <= 0)) { + return true; + } + int32_t len = 0, kvLen = 0; for (int i = 0; i < nRows; ++i) { len += dataRowLen(pBlkData); if (len > dataLen) { return false; } + + /** + * For SDataRow between version [2.1.5.0 and 2.1.6.X], it would never conflict. + * For SKVRow between version [2.1.5.0 and 2.1.6.X], it may conflict in below scenario + * - with 1st type byte 0x01 and sversion 0x0101(257), thus do further check + */ + if (dataRowLen(pBlkData) == 257) { + SMemRow memRow = pBlkData; + SKVRow kvRow = memRowKvBody(memRow); + int nCols = kvRowNCols(kvRow); + uint16_t calcTsOffset = (uint16_t)(TD_MEM_ROW_KV_HEAD_SIZE + sizeof(SColIdx) * nCols); + uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset; + if (calcTsOffset == realTsOffset) { + kvLen += memRowKvTLen(memRow); + } + } pBlkData = POINTER_SHIFT(pBlkData, dataRowLen(pBlkData)); } if (len != dataLen) { return false; } + if (kvLen == dataLen) { + return false; + } return true; } // for WAL SMemRow/SDataRow compatibility From 4227a8c19ca430e2fdfe840177d3c331b091bb4e Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 09:41:39 +0800 Subject: [PATCH 048/165] update header --- src/wal/src/walWrite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 7523369dc2..9590aba224 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -352,7 +352,7 @@ static bool walIsSDataRow(void *pBlkData, int nRows, int32_t dataLen) { SMemRow memRow = pBlkData; SKVRow kvRow = memRowKvBody(memRow); int nCols = kvRowNCols(kvRow); - uint16_t calcTsOffset = (uint16_t)(TD_MEM_ROW_KV_HEAD_SIZE + sizeof(SColIdx) * nCols); + uint16_t calcTsOffset = (uint16_t)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nCols); uint16_t realTsOffset = (kvRowColIdx(kvRow))->offset; if (calcTsOffset == realTsOffset) { kvLen += memRowKvTLen(memRow); From 7ed460bd035031a5ebc84b236d5b974d705ff385 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Sat, 14 Aug 2021 11:09:14 +0800 Subject: [PATCH 049/165] code optimization --- src/wal/src/walWrite.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 9590aba224..e991bf02aa 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -388,7 +388,7 @@ static int walSMemRowCheck(SWalHead *pHead) { } pBlk = (SSubmitBlk *)POINTER_SHIFT(pBlk, sizeof(SSubmitBlk) + dataLen); } - + ASSERT(nTotalRows >= 0); SWalHead *pWalHead = (SWalHead *)calloc(sizeof(SWalHead) + pHead->len + nTotalRows * sizeof(uint8_t), 1); if (pWalHead == NULL) { return -1; @@ -544,6 +544,8 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch if (0 != walSMemRowCheck(pHead)) { wError("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, fileId, pHead->version, pWal->version, pHead->len, offset); + tfClose(tfd); + tfree(buffer); return TAOS_SYSTEM_ERROR(errno); } (*writeFp)(pVnode, pHead, TAOS_QTYPE_WAL, NULL); From 9976d7a0931a43e3ab7f2785acae24c09f6013a1 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 14 Aug 2021 11:10:36 +0800 Subject: [PATCH 050/165] Hotfix/sangshuduo/td 5702 taosdemo remove memop for master (#7359) * [TD-5702]: taosdemo remove memory operation. * [TD-5702]: taosdemo remove memory operation. * add remainderBufLen to check row data generation. * row data generation with remainder buffer length checking. * git checkout --patch hotfix/sangshuduo/TD-5702-taosdemo-remove-memop taosdemo.c Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 38 +++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3b91de32b0..f3270a22f2 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1340,8 +1340,8 @@ static char *rand_bool_str(){ static int32_t rand_bool(){ static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 2; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 2; } static char *rand_tinyint_str() @@ -1356,8 +1356,8 @@ static int32_t rand_tinyint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 128; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 128; } static char *rand_smallint_str() @@ -1372,8 +1372,8 @@ static int32_t rand_smallint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor] % 32767; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND] % 32767; } static char *rand_int_str() @@ -1388,8 +1388,8 @@ static int32_t rand_int() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randint[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randint[cursor % MAX_PREPARED_RAND]; } static char *rand_bigint_str() @@ -1404,8 +1404,8 @@ static int64_t rand_bigint() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randbigint[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randbigint[cursor % MAX_PREPARED_RAND]; } static char *rand_float_str() @@ -1421,8 +1421,8 @@ static float rand_float() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return g_randfloat[cursor]; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return g_randfloat[cursor % MAX_PREPARED_RAND]; } static char *demo_current_float_str() @@ -1437,8 +1437,9 @@ static float UNUSED_FUNC demo_current_float() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return (float)(9.8 + 0.04 * (g_randint[cursor] % 10) + g_randfloat[cursor]/1000000000); + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return (float)(9.8 + 0.04 * (g_randint[cursor % MAX_PREPARED_RAND] % 10) + + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000); } static char *demo_voltage_int_str() @@ -1453,8 +1454,8 @@ static int32_t UNUSED_FUNC demo_voltage_int() { static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return 215 + g_randint[cursor] % 10; + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return 215 + g_randint[cursor % MAX_PREPARED_RAND] % 10; } static char *demo_phase_float_str() { @@ -1467,8 +1468,9 @@ static char *demo_phase_float_str() { static float UNUSED_FUNC demo_phase_float(){ static int cursor; cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return (float)((115 + g_randint[cursor] % 10 + g_randfloat[cursor]/1000000000)/360); + if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; + return (float)((115 + g_randint[cursor % MAX_PREPARED_RAND] % 10 + + g_randfloat[cursor % MAX_PREPARED_RAND]/1000000000)/360); } #if 0 From 9161244c22aa9584e3690c7158ac1ccce54e0d64 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Sat, 14 Aug 2021 12:04:12 +0800 Subject: [PATCH 051/165] [TD-6078]: fix binary/nchar null error in node.js [ci skip] (#7366) --- src/connector/nodejs/nodetaos/cinterface.js | 26 ++++++++++++++-- src/connector/nodejs/test/testnchar.js | 33 +++++++++++++++++++++ 2 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 src/connector/nodejs/test/testnchar.js diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 03d27e5593..5ba2739c35 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -109,6 +109,24 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) return res; } +function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + + let currOffset = 0; + while (currOffset < data.length) { + let len = data.readIntLE(currOffset, 2); + let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; + if (dataEntry[0] == 255) { + res.push(null) + } else { + res.push(dataEntry.toString("utf-8")); + } + currOffset += nbytes; + } + return res; +} + function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; @@ -117,7 +135,11 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) while (currOffset < data.length) { let len = data.readIntLE(currOffset, 2); let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; - res.push(dataEntry.toString("utf-8")); + if (dataEntry[0] == 255 && dataEntry[1] == 255) { + res.push(null) + } else { + res.push(dataEntry.toString("utf-8")); + } currOffset += nbytes; } return res; @@ -132,7 +154,7 @@ let convertFunctions = { [FieldTypes.C_BIGINT]: convertBigint, [FieldTypes.C_FLOAT]: convertFloat, [FieldTypes.C_DOUBLE]: convertDouble, - [FieldTypes.C_BINARY]: convertNchar, + [FieldTypes.C_BINARY]: convertBinary, [FieldTypes.C_TIMESTAMP]: convertTimestamp, [FieldTypes.C_NCHAR]: convertNchar } diff --git a/src/connector/nodejs/test/testnchar.js b/src/connector/nodejs/test/testnchar.js new file mode 100644 index 0000000000..68fad89c22 --- /dev/null +++ b/src/connector/nodejs/test/testnchar.js @@ -0,0 +1,33 @@ +const taos = require('../tdengine'); +var conn = taos.connect({ host: "localhost" }); +var c1 = conn.cursor(); + + +function checkData(data, row, col, expect) { + let checkdata = data[row][col]; + if (checkdata == expect) { + // console.log('check pass') + } + else { + console.log('check failed, expect ' + expect + ', but is ' + checkdata) + } +} + +c1.execute('drop database if exists testnodejsnchar') +c1.execute('create database testnodejsnchar') +c1.execute('use testnodejsnchar'); +c1.execute('create table tb (ts timestamp, value float, text binary(200))') +c1.execute("insert into tb values('2021-06-10 00:00:00', 24.7, '中文10000000000000000000000');") - +c1.execute('insert into tb values(1623254400150, 24.7, NULL);') +c1.execute('import into tb values(1623254400300, 24.7, "中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000");') +sql = 'select * from tb;' + +console.log('*******************************************') + +c1.execute(sql); +data = c1.fetchall(); +console.log(data) +//check data about insert data +checkData(data, 0, 2, '中文10000000000000000000000') +checkData(data, 1, 2, null) +checkData(data, 2, 2, '中文3中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000中文10000000000000000000000') \ No newline at end of file From f1c6cc11e64ce62ab17ca4a875dc218103e46ee6 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 14 Aug 2021 13:45:45 +0800 Subject: [PATCH 052/165] [td-255] Fix error found by regression test, and rename a macro. --- src/common/src/tglobal.c | 2 +- src/util/inc/tcompare.h | 8 ++++---- src/util/src/tcompare.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index b4ccdf6e3b..d880a73e84 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -79,7 +79,7 @@ int32_t tsCompressMsgSize = -1; // client int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; -int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_MAX_LEN; +int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from diff --git a/src/util/inc/tcompare.h b/src/util/inc/tcompare.h index 624d92e36a..84346cc79c 100644 --- a/src/util/inc/tcompare.h +++ b/src/util/inc/tcompare.h @@ -22,10 +22,10 @@ extern "C" { #include "os.h" -#define TSDB_PATTERN_MATCH 0 -#define TSDB_PATTERN_NOMATCH 1 -#define TSDB_PATTERN_NOWILDCARDMATCH 2 -#define TSDB_PATTERN_STRING_MAX_LEN 100 +#define TSDB_PATTERN_MATCH 0 +#define TSDB_PATTERN_NOMATCH 1 +#define TSDB_PATTERN_NOWILDCARDMATCH 2 +#define TSDB_PATTERN_STRING_DEFAULT_LEN 100 #define FLT_COMPAR_TOL_FACTOR 4 #define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * FLT_EPSILON)) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 5461b4b689..309aa55925 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -358,13 +358,13 @@ int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); - wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); + wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); free(pattern); + return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } From 8b2b278fb02d844c830509c56126bd85f4227e13 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 15 Aug 2021 05:49:16 +0000 Subject: [PATCH 053/165] [TD-6087] merge distinct to master --- src/client/src/tscSQLParser.c | 54 ++++++++---- src/client/src/tscServer.c | 2 +- src/client/src/tscUtil.c | 4 +- src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 13 +++ src/query/inc/qExecutor.h | 10 ++- src/query/src/qExecutor.c | 152 +++++++++++++++++++++------------- 7 files changed, 157 insertions(+), 79 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index fab7e8fa3b..f25ea70f32 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1940,20 +1940,6 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) { pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY; } -bool isValidDistinctSql(SQueryInfo* pQueryInfo) { - if (pQueryInfo == NULL) { - return false; - } - if ((pQueryInfo->type & TSDB_QUERY_TYPE_STABLE_QUERY) != TSDB_QUERY_TYPE_STABLE_QUERY - && (pQueryInfo->type & TSDB_QUERY_TYPE_TABLE_QUERY) != TSDB_QUERY_TYPE_TABLE_QUERY) { - return false; - } - if (tscNumOfExprs(pQueryInfo) == 1){ - return true; - } - return false; -} - static bool hasNoneUserDefineExpr(SQueryInfo* pQueryInfo) { size_t numOfExprs = taosArrayGetSize(pQueryInfo->exprList); for (int32_t i = 0; i < numOfExprs; ++i) { @@ -2043,9 +2029,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg1 = "too many items in selection clause"; const char* msg2 = "functions or others can not be mixed up"; const char* msg3 = "not support query expression"; - const char* msg4 = "only support distinct one column or tag"; + const char* msg4 = "not support distinct mixed with proj/agg func"; const char* msg5 = "invalid function name"; - const char* msg6 = "_block_dist not support subquery, only support stable/table"; + const char* msg6 = "not support distinct mixed with join"; + const char* msg7 = "not support distinct mixed with groupby"; + const char* msg8 = "not support distinct in nest query"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2056,18 +2044,25 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); } + bool hasDistinct = false; + bool hasAgg = false; size_t numOfExpr = taosArrayGetSize(pSelNodeList); + int32_t distIdx = -1; for (int32_t i = 0; i < numOfExpr; ++i) { int32_t outputIndex = (int32_t)tscNumOfExprs(pQueryInfo); tSqlExprItem* pItem = taosArrayGet(pSelNodeList, i); if (hasDistinct == false) { hasDistinct = (pItem->distinct == true); + distIdx = hasDistinct ? i : -1; } int32_t type = pItem->pNode->type; if (type == SQL_NODE_SQLFUNCTION) { + hasAgg = true; + if (hasDistinct) break; + pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { @@ -2108,10 +2103,22 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS } } + //TODO(dengyihao), refactor as function + //handle distinct func mixed with other func if (hasDistinct == true) { - if (!isValidDistinctSql(pQueryInfo) ) { + if (distIdx != 0 || hasAgg) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + if (joinQuery) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } + if (pQueryInfo->groupbyExpr.numOfGroupCols != 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + } + if (pQueryInfo->pDownstream != NULL) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + } + pQueryInfo->distinct = true; } @@ -5512,6 +5519,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg1 = "invalid column name"; const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed"; const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; + const char* msg4 = "orderby column must projected in subquery"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -5627,6 +5635,17 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // orderby ts query on super table if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + bool found = false; + for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr->base.functionId == TSDB_FUNC_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + found = true; + break; + } + } + if (!found && pQueryInfo->pDownstream) { + return invalidOperationMsg(pMsgBuf, msg4); + } addPrimaryTsColIntoResult(pQueryInfo, pCmd); } } @@ -8418,6 +8437,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS pSub->pUdfInfo = pUdfInfo; pSub->udfCopy = true; + pSub->pDownstream = pQueryInfo; int32_t code = validateSqlNode(pSql, p, pSub); if (code != TSDB_CODE_SUCCESS) { return code; diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e809fe3137..c0723e210a 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -409,7 +409,7 @@ static void doProcessMsgFromServer(SSchedMsg* pSchedMsg) { if ((TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY | TSDB_QUERY_TYPE_SUBQUERY | TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) && !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY)) || - (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY))) { + (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_NEST_SUBQUERY)) || (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY) && pQueryInfo->distinct)) { // do nothing in case of super table subquery } else { pSql->retry += 1; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f54827911a..f0a46d3e48 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3533,8 +3533,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNewQueryInfo->numOfTables = 0; pNewQueryInfo->pTableMetaInfo = NULL; pNewQueryInfo->bufLen = pQueryInfo->bufLen; - pNewQueryInfo->buf = malloc(pQueryInfo->bufLen); + + + pNewQueryInfo->distinct = pQueryInfo->distinct; if (pNewQueryInfo->buf == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 25d1c90ec5..ed35168457 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -59,6 +59,7 @@ extern char tsLocale[]; extern char tsCharset[]; // default encode string extern int8_t tsEnableCoreFile; extern int32_t tsCompressMsgSize; +extern int32_t tsMaxNumOfDistinctResults; extern char tsTempDir[]; //query buffer management diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index d880a73e84..e4ff353787 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -89,6 +89,9 @@ int32_t tsMaxNumOfOrderedResults = 100000; // 10 ms for sliding time, the value will changed in case of time precision changed int32_t tsMinSlidingTime = 10; +// the maxinum number of distict query result +int32_t tsMaxNumOfDistinctResults = 1000 * 10000; + // 1 us for interval time range, changed accordingly int32_t tsMinIntervalTime = 1; @@ -546,6 +549,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "maxNumOfDistinctRes"; + cfg.ptr = &tsMaxNumOfDistinctResults; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 10*10000; + cfg.maxValue = 10000*10000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "numOfMnodes"; cfg.ptr = &tsNumOfMnodes; cfg.valType = TAOS_CFG_VTYPE_INT32; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index d30971ab47..e8b7855769 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -510,13 +510,21 @@ typedef struct SStateWindowOperatorInfo { bool reptScan; } SStateWindowOperatorInfo ; +typedef struct SDistinctDataInfo { + int32_t index; + int32_t type; + int32_t bytes; +} SDistinctDataInfo; + typedef struct SDistinctOperatorInfo { SHashObj *pSet; SSDataBlock *pRes; bool recordNullVal; //has already record the null value, no need to try again int64_t threshold; int64_t outputCapacity; - int32_t colIndex; + int32_t totalBytes; + char* buf; + SArray* pDistinctDataInfo; } SDistinctOperatorInfo; struct SGlobalMerger; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bae62f1c82..cf77820b1f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -44,6 +44,10 @@ #define SDATA_BLOCK_INITIALIZER (SDataBlockInfo) {{0}, 0} +#define MULTI_KEY_DELIM "-" + +#define HASH_CAPACITY_LIMIT 10000000 + #define TIME_WINDOW_COPY(_dst, _src) do {\ (_dst).skey = (_src).skey;\ (_dst).ekey = (_src).ekey;\ @@ -3581,6 +3585,7 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo; int64_t tid = 0; + pRuntimeEnv->keyBuf = realloc(pRuntimeEnv->keyBuf, sizeof(tid) + sizeof(int64_t) + POINTER_BYTES); SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid); for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { @@ -6534,6 +6539,8 @@ static void destroyConditionOperatorInfo(void* param, int32_t numOfOutput) { static void destroyDistinctOperatorInfo(void* param, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = (SDistinctOperatorInfo*) param; taosHashCleanup(pInfo->pSet); + tfree(pInfo->buf); + taosArrayDestroy(pInfo->pDistinctDataInfo); pInfo->pRes = destroyOutputBuf(pInfo->pRes); } @@ -7075,6 +7082,52 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf return pOperator; } +static bool initMultiDistinctInfo(SDistinctOperatorInfo *pInfo, SOperatorInfo* pOperator, SSDataBlock *pBlock) { + if (taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput) { + // distinct info already inited + return true; + } + for (int i = 0; i < pOperator->numOfOutput; i++) { + pInfo->totalBytes += pOperator->pExpr[i].base.colBytes; + } + for (int i = 0; i < pOperator->numOfOutput; i++) { + int numOfBlock = (int)taosArrayGetSize(pBlock->pDataBlock); + assert(i < numOfBlock); + for (int j = 0; j < numOfBlock; j++) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, j); + if (pColDataInfo->info.colId == pOperator->pExpr[i].base.resColId) { + SDistinctDataInfo item = {.index = j, .type = pColDataInfo->info.type, .bytes = pColDataInfo->info.bytes}; + taosArrayInsert(pInfo->pDistinctDataInfo, i, &item); + } + } + } + pInfo->totalBytes += (int32_t)strlen(MULTI_KEY_DELIM) * (pOperator->numOfOutput); + pInfo->buf = calloc(1, pInfo->totalBytes); + return taosArrayGetSize(pInfo->pDistinctDataInfo) == pOperator->numOfOutput ? true : false; +} +static void buildMultiDistinctKey(SDistinctOperatorInfo *pInfo, SSDataBlock *pBlock, int32_t rowId) { + char *p = pInfo->buf; + memset(p, 0, pInfo->totalBytes); + + for (int i = 0; i < taosArrayGetSize(pInfo->pDistinctDataInfo); i++) { + SDistinctDataInfo* pDistDataInfo = (SDistinctDataInfo *)taosArrayGet(pInfo->pDistinctDataInfo, i); + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); + char *val = ((char *)pColDataInfo->pData) + pColDataInfo->info.bytes * rowId; + if (isNull(val, pDistDataInfo->type)) { + p += pDistDataInfo->bytes; + continue; + } + if (IS_VAR_DATA_TYPE(pDistDataInfo->type)) { + memcpy(p, varDataVal(val), varDataLen(val)); + p += varDataLen(val); + } else { + memcpy(p, val, pDistDataInfo->bytes); + p += pDistDataInfo->bytes; + } + memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM)); + p += strlen(MULTI_KEY_DELIM); + } +} static SSDataBlock* hashDistinct(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; @@ -7082,11 +7135,9 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { return NULL; } - SDistinctOperatorInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; - pRes->info.rows = 0; SSDataBlock* pBlock = NULL; while(1) { @@ -7099,77 +7150,60 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { pOperator->status = OP_EXEC_DONE; break; } - if (pInfo->colIndex == -1) { - for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) { - SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i); - if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) { - pInfo->colIndex = i; - break; - } - } - } - if (pInfo->colIndex == -1) { + if (!initMultiDistinctInfo(pInfo, pOperator, pBlock)) { setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); pOperator->status = OP_EXEC_DONE; - return NULL; - } - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex); - - int16_t bytes = pColInfoData->info.bytes; - int16_t type = pColInfoData->info.type; - - // ensure the output buffer size - SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, 0); - if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { - int32_t newSize = pRes->info.rows + pBlock->info.rows; - char* tmp = realloc(pResultColInfoData->pData, newSize * bytes); - if (tmp == NULL) { - return NULL; - } else { - pResultColInfoData->pData = tmp; + break; + } + + // ensure result output buf + if (pRes->info.rows + pBlock->info.rows > pInfo->outputCapacity) { + int32_t newSize = pRes->info.rows + pBlock->info.rows; + for (int i = 0; i < taosArrayGetSize(pRes->pDataBlock); i++) { + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, i); + SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, i); + char* tmp = realloc(pResultColInfoData->pData, newSize * pDistDataInfo->bytes); + if (tmp == NULL) { + return NULL; + } else { + pResultColInfoData->pData = tmp; + } + } pInfo->outputCapacity = newSize; - } - } - - for(int32_t i = 0; i < pBlock->info.rows; ++i) { - char* val = ((char*)pColInfoData->pData) + bytes * i; - if (isNull(val, type)) { - continue; - } - char* p = val; - size_t keyLen = 0; - if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) { - tstr* var = (tstr*)(val); - p = var->data; - keyLen = varDataLen(var); - } else { - keyLen = bytes; - } - - int dummy; - void* res = taosHashGet(pInfo->pSet, p, keyLen); - if (res == NULL) { - taosHashPut(pInfo->pSet, p, keyLen, &dummy, sizeof(dummy)); - char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows; - memcpy(start, val, bytes); + } + for (int32_t i = 0; i < pBlock->info.rows; i++) { + buildMultiDistinctKey(pInfo, pBlock, i); + if (taosHashGet(pInfo->pSet, pInfo->buf, pInfo->totalBytes) == NULL) { + int32_t dummy; + taosHashPut(pInfo->pSet, pInfo->buf, pInfo->totalBytes, &dummy, sizeof(dummy)); + for (int j = 0; j < taosArrayGetSize(pRes->pDataBlock); j++) { + SDistinctDataInfo* pDistDataInfo = taosArrayGet(pInfo->pDistinctDataInfo, j); // distinct meta info + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pDistDataInfo->index); //src + SColumnInfoData* pResultColInfoData = taosArrayGet(pRes->pDataBlock, j); // dist + char* val = ((char*)pColInfoData->pData) + pDistDataInfo->bytes * i; + char *start = pResultColInfoData->pData + pDistDataInfo->bytes * pInfo->pRes->info.rows; + memcpy(start, val, pDistDataInfo->bytes); + } pRes->info.rows += 1; - } - } + } + } if (pRes->info.rows >= pInfo->threshold) { break; } } - return (pInfo->pRes->info.rows > 0)? pInfo->pRes:NULL; } SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo)); - pInfo->colIndex = -1; - pInfo->threshold = 10000000; // distinct result threshold - pInfo->outputCapacity = 4096; - pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK); + + pInfo->totalBytes = 0; + pInfo->buf = NULL; + pInfo->threshold = tsMaxNumOfDistinctResults; // distinct result threshold + pInfo->outputCapacity = 4096; + pInfo->pDistinctDataInfo = taosArrayInit(numOfOutput, sizeof(SDistinctDataInfo)); + pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); From 3aa24b6b235c75627b7bf95cb0f4bfd6827bd6aa Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 15 Aug 2021 13:52:07 +0800 Subject: [PATCH 054/165] Hotfix/sangshuduo/td 3197 taosdemo coverity scan for master (#7375) * [TD-3197]: taosdemo and taosdump coverity scan issues. * exit if read sample file failed. * fix converity scan issue. * fix coverity scan issue. * fix coverity scan memory leak. * fix resource leak reported by coverity scan. * fix taosdemo coverity scan issue. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f3270a22f2..18f5877e09 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5820,6 +5820,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "INT", strlen("INT"))) { int32_t *bind_int = malloc(sizeof(int32_t)); + assert(bind_int); if (value) { *bind_int = atoi(value); @@ -5834,6 +5835,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "BIGINT", strlen("BIGINT"))) { int64_t *bind_bigint = malloc(sizeof(int64_t)); + assert(bind_bigint); if (value) { *bind_bigint = atoll(value); @@ -5848,6 +5850,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "FLOAT", strlen("FLOAT"))) { float *bind_float = malloc(sizeof(float)); + assert(bind_float); if (value) { *bind_float = (float)atof(value); @@ -5862,6 +5865,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "DOUBLE", strlen("DOUBLE"))) { double *bind_double = malloc(sizeof(double)); + assert(bind_double); if (value) { *bind_double = atof(value); @@ -5876,6 +5880,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "SMALLINT", strlen("SMALLINT"))) { int16_t *bind_smallint = malloc(sizeof(int16_t)); + assert(bind_smallint); if (value) { *bind_smallint = (int16_t)atoi(value); @@ -5890,6 +5895,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "TINYINT", strlen("TINYINT"))) { int8_t *bind_tinyint = malloc(sizeof(int8_t)); + assert(bind_tinyint); if (value) { *bind_tinyint = (int8_t)atoi(value); @@ -5904,6 +5910,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "BOOL", strlen("BOOL"))) { int8_t *bind_bool = malloc(sizeof(int8_t)); + assert(bind_bool); if (value) { if (strncasecmp(value, "true", 4)) { @@ -5923,6 +5930,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "TIMESTAMP", strlen("TIMESTAMP"))) { int64_t *bind_ts2 = malloc(sizeof(int64_t)); + assert(bind_ts2); if (value) { if (strchr(value, ':') && strchr(value, '-')) { @@ -5937,6 +5945,7 @@ static int32_t prepareStmtBindArrayByType( if (TSDB_CODE_SUCCESS != taosParseTime( value, &tmpEpoch, strlen(value), timePrec, 0)) { + free(bind_ts2); errorPrint("Input %s, time format error!\n", value); return -1; } @@ -6261,7 +6270,7 @@ static int32_t prepareStbStmtBindTag( char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, g_args.len_of_binary); + __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -6293,7 +6302,7 @@ static int32_t prepareStbStmtBindRand( char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", - __func__, __LINE__, g_args.len_of_binary); + __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } From 6a18e3d3d512ed6eea96f04b717abfb359139167 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 15 Aug 2021 15:45:30 +0000 Subject: [PATCH 055/165] [TD-6097] Crash occurs when the select function in the subquery is used with group by --- src/client/src/tscSQLParser.c | 9 +++------ src/client/src/tscUtil.c | 1 - 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index c7db7b5d4b..4e5245742c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2643,7 +2643,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MICRO); } else if (info.precision == TSDB_TIME_PRECISION_MICRO) { tickPerSec /= TSDB_TICK_PER_SECOND(TSDB_TIME_PRECISION_MILLI); - } + } if (tickPerSec <= 0 || tickPerSec < TSDB_TICK_PER_SECOND(info.precision)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg10); @@ -2677,8 +2677,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col assert(ids.num == 1); tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } - tscInsertPrimaryTsSourceColumn(pQueryInfo, pExpr->base.uid); + return TSDB_CODE_SUCCESS; } @@ -3060,7 +3060,6 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tscColumnListInsert(pQueryInfo->colList, index.columnIndex, uid, &s); } } - tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); return TSDB_CODE_SUCCESS; } @@ -8418,7 +8417,7 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { n += 1; } - + info->numOfColumns = n; return meta; } @@ -8447,8 +8446,6 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS return code; } - pSub->pDownstream = pQueryInfo; - // create dummy table meta info STableMetaInfo* pTableMetaInfo1 = calloc(1, sizeof(STableMetaInfo)); if (pTableMetaInfo1 == NULL) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f0a46d3e48..c2cf24520d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1503,7 +1503,6 @@ void tscFreeSqlObj(SSqlObj* pSql) { tscFreeSqlResult(pSql); tscResetSqlCmd(pCmd, false); - memset(pCmd->payload, 0, (size_t)pCmd->allocSize); tfree(pCmd->payload); pCmd->allocSize = 0; From d8897baa9cb3b9e8024889df16fdcbe6de697970 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 08:51:02 +0800 Subject: [PATCH 056/165] [TD-5998]:_block_dist() only support tables, not subqueries --- src/client/src/tscSQLParser.c | 6 ++++++ tests/script/general/compute/block_dist.sim | 6 +++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6df724881e..7b936487f0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2045,6 +2045,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS const char* msg3 = "not support query expression"; const char* msg4 = "only support distinct one column or tag"; const char* msg5 = "invalid function name"; + const char* msg6 = "_block_dist not support subquery, only support stable/table"; // too many result columns not support order by in query if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) { @@ -2068,6 +2069,11 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS int32_t type = pItem->pNode->type; if (type == SQL_NODE_SQLFUNCTION) { pItem->pNode->functionId = isValidFunction(pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); + + if (pItem->pNode->functionId == TSDB_FUNC_BLKINFO && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } + SUdfInfo* pUdfInfo = NULL; if (pItem->pNode->functionId < 0) { pUdfInfo = isValidUdf(pQueryInfo->pUdfInfo, pItem->pNode->Expr.operand.z, pItem->pNode->Expr.operand.n); diff --git a/tests/script/general/compute/block_dist.sim b/tests/script/general/compute/block_dist.sim index 51cf903654..5343c1db28 100644 --- a/tests/script/general/compute/block_dist.sim +++ b/tests/script/general/compute/block_dist.sim @@ -84,6 +84,10 @@ if $rows != 1 then return -1 endi +print ============== TD-5998 +sql_error select _block_dist() from (select * from $nt) +sql_error select _block_dist() from (select * from $mt) + print =============== clear sql drop database $db sql show databases @@ -91,4 +95,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 43bd6c65871c498842b940386facd421b2335aac Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Mon, 16 Aug 2021 09:37:13 +0800 Subject: [PATCH 057/165] [ci skip] update daily performance script --- tests/pytest/tools/taosdemoPerformance.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 4a5abd49d8..1d28a2708f 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -145,26 +145,26 @@ class taosdemoPerformace: binPath = buildPath + "/build/bin/" os.system( - "%staosdemo -f %s > taosdemoperf.txt 2>&1" % + "%staosdemo -f %s > /dev/null 2>&1" % (binPath, self.generateJson())) self.createTableTime = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") + "grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'") self.insertRecordsTime = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") + "grep 'Spent' insert_res.txt | awk 'NR==2{print $2}'") self.recordsPerSecond = self.getCMDOutput( - "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") + "grep 'Spent' insert_res.txt | awk 'NR==2{print $16}'") self.commitID = self.getCMDOutput("git rev-parse --short HEAD") delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $4}'") + "grep 'delay' insert_res.txt | awk '{print $4}'") self.avgDelay = delay[:-4] delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $6}'") + "grep 'delay' insert_res.txt | awk '{print $6}'") self.maxDelay = delay[:-4] delay = self.getCMDOutput( - "grep 'delay' taosdemoperf.txt | awk '{print $8}'") + "grep 'delay' insert_res.txt | awk '{print $8}'") self.minDelay = delay[:-3] - os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt") + os.system("[ -f insert_res.txt ] && rm insert_res.txt") def createTablesAndStoreData(self): cursor = self.conn2.cursor() @@ -185,7 +185,7 @@ class taosdemoPerformace: cursor.close() cursor1 = self.conn.cursor() - # cursor1.execute("drop database if exists %s" % self.insertDB) + cursor1.execute("drop database if exists %s" % self.insertDB) cursor1.close() if __name__ == '__main__': From 2db8feb7448e299e14a25f5247cf7f4942b1f4b9 Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 16 Aug 2021 13:23:54 +0800 Subject: [PATCH 058/165] fix binary interp function --- src/query/src/qAggMain.c | 34 ++++++++++++++++++++++++++++++---- src/query/src/qExecutor.c | 10 ++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 14d3f4e417..1587746587 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3790,11 +3790,37 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { static void interp_function(SQLFunctionCtx *pCtx) { // at this point, the value is existed, return directly if (pCtx->size > 0) { - // impose the timestamp check - TSKEY key = GET_TS_DATA(pCtx, 0); + bool ascQuery = (pCtx->order == TSDB_ORDER_ASC); + TSKEY key; + char *pData; + int32_t typedData = 0; + + if (ascQuery) { + key = GET_TS_DATA(pCtx, 0); + pData = GET_INPUT_DATA(pCtx, 0); + } else { + key = pCtx->start.key; + if (key == INT64_MIN) { + key = GET_TS_DATA(pCtx, 0); + pData = GET_INPUT_DATA(pCtx, 0); + } else { + if (!(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL)) { + pData = pCtx->start.ptr; + } else { + typedData = 1; + pData = (char *)&pCtx->start.val; + } + } + } + + //if (key == pCtx->startTs && (ascQuery || !(IS_NUMERIC_TYPE(pCtx->inputType) || pCtx->inputType == TSDB_DATA_TYPE_BOOL))) { if (key == pCtx->startTs) { - char *pData = GET_INPUT_DATA(pCtx, 0); - assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); + if (typedData) { + SET_TYPED_DATA(pCtx->pOutput, pCtx->inputType, *(double *)pData); + } else { + assignVal(pCtx->pOutput, pData, pCtx->inputBytes, pCtx->inputType); + } + SET_VAL(pCtx, 1, 1); } else { interp_function_impl(pCtx); diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index bae62f1c82..a6a85b370e 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1327,6 +1327,16 @@ void doTimeWindowInterpolation(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, pCtx[k].end.key = curTs; pCtx[k].end.val = v2; + + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + if (prevRowIndex == -1) { + pCtx[k].start.ptr = (char *)pRuntimeEnv->prevRow[index]; + } else { + pCtx[k].start.ptr = (char *)pColInfo->pData + prevRowIndex * pColInfo->info.bytes; + } + + pCtx[k].end.ptr = (char *)pColInfo->pData + curRowIndex * pColInfo->info.bytes; + } } } else if (functionId == TSDB_FUNC_TWA) { SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; From 7e56ba09e9624a2530efd93ee59efc43c9fd4047 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 13:26:59 +0800 Subject: [PATCH 059/165] obtain coredump execfn in script[ci skip] --- tests/test-all.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index 6e7963e787..8dd1ade9be 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -24,9 +24,9 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` - core_file=`echo $corefile|cut -d " " -f2` - proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then + core_file=`echo $corefile|cut -d " " -f2` + proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,` echo 'taosd or taos has generated core' rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then @@ -46,7 +46,7 @@ function dohavecore(){ fi fi if [[ $1 == 1 ]];then - echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit + echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit exit 8 fi fi From 7759df3b0b5389857ce5461d4d804c2e7f70c6fc Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 16 Aug 2021 15:29:38 +0800 Subject: [PATCH 060/165] [TD-6054]: Filtered by tag with nchar value not as expected --- src/util/src/tcompare.c | 22 ++++++++- tests/pytest/query/filterWithinMultiNchar.py | 51 ++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/query/filterWithinMultiNchar.py diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 309aa55925..921da82d44 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -199,7 +199,16 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE); + char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + memcpy(pLeftTerm, varDataVal(pLeft), len1); + memcpy(pRightTerm, varDataVal(pRight), len2); + + int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); + + tfree(pLeftTerm); + tfree(pRightTerm); + if (ret == 0) { return 0; } else { @@ -510,7 +519,16 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { return t1->len > t2->len? 1:-1; } - int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE); + char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); + char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); + memcpy(t1_term, t1->data, t1->len); + memcpy(t2_term, t2->data, t2->len); + + int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); + + tfree(t1_term); + tfree(t2_term); + if (ret == 0) { return ret; } diff --git a/tests/pytest/query/filterWithinMultiNchar.py b/tests/pytest/query/filterWithinMultiNchar.py new file mode 100644 index 0000000000..15fcf4e24b --- /dev/null +++ b/tests/pytest/query/filterWithinMultiNchar.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create stable t6 (ts timestamp,val int,flow nchar(36)) tags(dev nchar(36),dev1 nchar(36),dev2 nchar(36))") + tdSql.execute("insert into t6004 using t6 (dev,dev1,dev2) tags ('b50c79bc-b102-48e6-bda1-4212263e46d0','b50c79bc-b102-48e6-bda1-4212263e46d0', 'b50c79bc-b102-48e6-bda1-4212263e46d0') values(now,1,'b50c79bc-b102-48e6-bda1-4212263e46d0')") + + + print("==============step2") + tdSql.query("select * from t6 where dev='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev1='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev2='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 28a50c47b96dcd3dcc684099050553349b443791 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 16 Aug 2021 15:29:38 +0800 Subject: [PATCH 061/165] [TD-6054]: Filtered by tag with nchar value not as expected --- src/util/src/tcompare.c | 22 ++++++++- tests/pytest/query/filterWithinMultiNchar.py | 51 ++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 tests/pytest/query/filterWithinMultiNchar.py diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 354e7899c2..3619dad83b 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -129,7 +129,16 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1/TSDB_NCHAR_SIZE); + char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); + memcpy(pLeftTerm, varDataVal(pLeft), len1); + memcpy(pRightTerm, varDataVal(pRight), len2); + + int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); + + tfree(pLeftTerm); + tfree(pRightTerm); + if (ret == 0) { return 0; } else { @@ -410,7 +419,16 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { return t1->len > t2->len? 1:-1; } - int32_t ret = wcsncmp((wchar_t*) t1->data, (wchar_t*) t2->data, t2->len/TSDB_NCHAR_SIZE); + char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); + char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); + memcpy(t1_term, t1->data, t1->len); + memcpy(t2_term, t2->data, t2->len); + + int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); + + tfree(t1_term); + tfree(t2_term); + if (ret == 0) { return ret; } diff --git a/tests/pytest/query/filterWithinMultiNchar.py b/tests/pytest/query/filterWithinMultiNchar.py new file mode 100644 index 0000000000..15fcf4e24b --- /dev/null +++ b/tests/pytest/query/filterWithinMultiNchar.py @@ -0,0 +1,51 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create stable t6 (ts timestamp,val int,flow nchar(36)) tags(dev nchar(36),dev1 nchar(36),dev2 nchar(36))") + tdSql.execute("insert into t6004 using t6 (dev,dev1,dev2) tags ('b50c79bc-b102-48e6-bda1-4212263e46d0','b50c79bc-b102-48e6-bda1-4212263e46d0', 'b50c79bc-b102-48e6-bda1-4212263e46d0') values(now,1,'b50c79bc-b102-48e6-bda1-4212263e46d0')") + + + print("==============step2") + tdSql.query("select * from t6 where dev='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev1='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + tdSql.query("select * from t6 where dev2='b50c79bc-b102-48e6-bda1-4212263e46d0'") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 010d416f4a7ad5be907fa8672653f6a53414d99f Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 16 Aug 2021 16:07:43 +0800 Subject: [PATCH 062/165] fix crash issue --- src/client/src/tscUtil.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f54827911a..a193921af2 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3831,6 +3831,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pNew->sqlstr = strdup(pSql->sqlstr); pNew->fp = tscSubqueryCompleteCallback; pNew->maxRetry = pSql->maxRetry; + + pNew->cmd.resColumnId = TSDB_RES_COL_ID; + tsem_init(&pNew->rspSem, 0, 0); SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id From d7eb5a22fec0e02884a8d1b93cb2f68a9dc7fa18 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:42:27 +0800 Subject: [PATCH 063/165] [TD-6035]add connector test in CI --- Jenkinsfile | 21 ++++++++++++++++++++- tests/gotest/case001/case001.sh | 5 +++-- tests/test-all.sh | 8 ++++---- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0eb9a1aa95..882d224407 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -160,7 +160,6 @@ pipeline { skipbuild='2' skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) println skipbuild - } sh''' rm -rf ${WORKSPACE}.tes @@ -225,6 +224,26 @@ pipeline { steps { timeout(time: 55, unit: 'MINUTES'){ pre_test() + sh ''' + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + nohup taosd >/dev/null & + sleep 10 + ''' + sh ''' + cd ${WKC}/tests/examples/nodejs + npm install td2.0-connector > /dev/null 2>&1 + node nodejsChecker.js host=localhost + ''' + sh ''' + cd ${WKC}/tests/examples/C#/taosdemo + mcs -out:taosdemo *.cs > /dev/null 2>&1 + echo '' |./taosdemo + ''' + sh ''' + cd ${WKC}/tests/gotest + bash batchtest.sh + ''' sh ''' cd ${WKC}/tests ./test-all.sh b1fq diff --git a/tests/gotest/case001/case001.sh b/tests/gotest/case001/case001.sh index 831e9f83ac..94e5bb44e0 100644 --- a/tests/gotest/case001/case001.sh +++ b/tests/gotest/case001/case001.sh @@ -15,7 +15,8 @@ script_dir="$(dirname $(readlink -f $0))" ###### step 3: start build cd $script_dir rm -f go.* -go mod init demotest -go build +go mod init demotest > /dev/null 2>&1 +go mod tidy > /dev/null 2>&1 +go build > /dev/null 2>&1 sleep 1s ./demotest -h $1 -p $2 diff --git a/tests/test-all.sh b/tests/test-all.sh index 6e7963e787..c18951b8ac 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -12,7 +12,7 @@ IN_TDINTERNAL="community" function stopTaosd { echo "Stop taosd" - sudo systemctl stop taosd + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] do @@ -24,9 +24,9 @@ function stopTaosd { function dohavecore(){ corefile=`find $corepath -mmin 1` - core_file=`echo $corefile|cut -d " " -f2` - proc=`echo $corefile|cut -d "_" -f3` if [ -n "$corefile" ];then + core_file=`echo $corefile|cut -d " " -f2` + proc=`file $core_file|awk -F "execfn:" '/execfn:/{print $2}'|tr -d \' |awk '{print $1}'|tr -d \,` echo 'taosd or taos has generated core' rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]] && [[ $1 == 1 ]]; then @@ -46,7 +46,7 @@ function dohavecore(){ fi fi if [[ $1 == 1 ]];then - echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit + echo '\n'|gdb $proc $core_file -ex "bt 10" -ex quit exit 8 fi fi From 01f42064f26d4c4a4c2d470d1b43f1bbf893f5f8 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:58:43 +0800 Subject: [PATCH 064/165] make ci happy From 3f62a65b67c7723f6aee41eeb00917a342970670 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 17:06:05 +0800 Subject: [PATCH 065/165] test --- tests/test-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test-all.sh b/tests/test-all.sh index c18951b8ac..2578030165 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -12,7 +12,7 @@ IN_TDINTERNAL="community" function stopTaosd { echo "Stop taosd" - sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail' + sudo systemctl stop taosd || echo 'no sudo or systemctl or stop fail ' PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ] do From a6011d55a20fe9542480ccb66635a01d4d8f214c Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 16 Aug 2021 18:03:17 +0800 Subject: [PATCH 066/165] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index d34080aa43..84ef060196 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.20.12") + SET(TD_VER_NUMBER "2.0.20.13") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 7538765d2c..b08ae53ecd 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.20.12' +version: '2.0.20.13' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.20.12 + - usr/lib/libtaos.so.2.0.20.13 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 296376df1fb92f5bbcd137d759c00689d56215e4 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 18:36:09 +0800 Subject: [PATCH 067/165] [TD-6086]:num of tags taken from output cols instead of groupby expr --- src/client/src/tscLocalMerge.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 97d52cc684..5fe020bb33 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -396,7 +396,16 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde if (pQueryInfo->fillType != TSDB_FILL_NONE) { SFillColInfo* pFillCol = createFillColInfo(pQueryInfo); - pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols, + // support sql like: select selective_function, tag1... where ... group by tag3... fill(not fill none) + // the group by expr columns and select tags are different + int32_t numOfCols = tscNumOfFields(pQueryInfo); + int32_t numOfTags = 0; + for (int32_t i = 0; i < numOfCols; ++i) { + if (TSDB_COL_IS_TAG(pFillCol[i].flag)) { + numOfTags++; + } + } + pReducer->pFillInfo = taosCreateFillInfo(pQueryInfo->order.order, revisedSTime, numOfTags, 4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit, tinfo.precision, pQueryInfo->fillType, pFillCol, pSql); } From abff364c1e204c9040447f53179023f3033e3178 Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 16 Aug 2021 19:57:56 +0800 Subject: [PATCH 068/165] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index ffceecf492..12dc2b1b16 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.1.6.0") + SET(TD_VER_NUMBER "2.1.7.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index c04fa3298b..260af6f21b 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.1.6.0' +version: '2.1.7.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.1.6.0 + - usr/lib/libtaos.so.2.1.7.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 2bd11ebfd66bf9525124b4c316674b365b2a1c11 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 17 Aug 2021 09:12:16 +0800 Subject: [PATCH 069/165] [TD-6086]:add test case --- tests/script/general/parser/function.sim | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 65058333fb..c00f6fc898 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -310,6 +310,12 @@ if $rows != 6 then return -1 endi +print =============================> TD-6086 +sql create stable td6086st(ts timestamp, d double) tags(t nchar(50)); +sql create table td6086ct1 using td6086st tags("ct1"); +sql create table td6086ct2 using td6086st tags("ct2"); +sql SELECT LAST(d),t FROM td6086st WHERE tbname in ('td6086ct1', 'td6086ct2') and ts>="2019-07-30 00:00:00" and ts<="2021-08-31 00:00:00" interval(1800s) fill(prev) GROUP BY tbname; + print ==================> td-2624 sql create table tm2(ts timestamp, k int, b binary(12)); sql insert into tm2 values('2011-01-02 18:42:45.326', -1,'abc'); From 497159f3aa53b605585378fc4d5804a573cf1a4f Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 09:40:56 +0800 Subject: [PATCH 070/165] fix subquery reparse sql issue --- src/client/src/tscSubquery.c | 13 +++++++++---- src/client/src/tscUtil.c | 18 ++++++++++++------ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 553d2c0804..04b10caa4e 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2735,18 +2735,23 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - code = tsParseSql(pParentSql, true); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - pParentSql->res.code = code; - tscAsyncResultOnError(pParentSql); + userSql->res.code = code; + tscAsyncResultOnError(userSql); return; } - executeQuery(pParentSql, pQueryInfo); + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + + pQueryInfo = tscGetQueryInfo(&userSql->cmd); + executeQuery(userSql, pQueryInfo); } else { (*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 8c2165cadf..ae78c7a81e 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3771,19 +3771,24 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - code = tsParseSql(pParentSql, true); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - pParentSql->res.code = code; - tscAsyncResultOnError(pParentSql); + userSql->res.code = code; + tscAsyncResultOnError(userSql); return; } - SQueryInfo *pQueryInfo = tscGetQueryInfo(pParentCmd); - executeQuery(pParentSql, pQueryInfo); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + + executeQuery(userSql, pQueryInfo); return; } @@ -3805,8 +3810,9 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { } if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly + assert(pSql->subState.numOfSub == 0); pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); - + assert(pSql->pSubs == NULL); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); code = pthread_mutex_init(&pSql->subState.mutex, NULL); From 762284c409276108e781c3599b3f55fa973eb00b Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 09:52:39 +0800 Subject: [PATCH 071/165] fix bug --- src/client/inc/tscSubquery.h | 3 +++ src/client/src/tscSubquery.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f0349c2b3d..f21ce67e9e 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -50,6 +50,9 @@ void tscUnlockByThread(int64_t *lockedBy); int tsInsertInitialCheck(SSqlObj *pSql); +void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); + + #ifdef __cplusplus } #endif diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 04b10caa4e..52bf5d9bfe 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2034,7 +2034,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { tscAsyncResultOnError(pSql); } -static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { +void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { assert(numOfSubs <= pSql->subState.numOfSub && numOfSubs >= 0); for(int32_t i = 0; i < numOfSubs; ++i) { From 0a969d042c7dfaed35f130d2a77168b0a4870238 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:06:01 +0800 Subject: [PATCH 072/165] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 1 + 2 files changed, 2 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 52bf5d9bfe..32955bef3c 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2749,6 +2749,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO } doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; pQueryInfo = tscGetQueryInfo(&userSql->cmd); executeQuery(userSql, pQueryInfo); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ae78c7a81e..098204605a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4934,3 +4934,4 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { taosHashRemove(tscTableMetaMap, fname, len); tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); } + \ No newline at end of file From af641899eb93157f586ab5e3bb47c602d8027822 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:07:24 +0800 Subject: [PATCH 073/165] fix bug --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 098204605a..75473c7baf 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3787,6 +3787,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; executeQuery(userSql, pQueryInfo); return; @@ -4934,4 +4935,3 @@ void tscRemoveTableMetaBuf(STableMetaInfo* pTableMetaInfo, uint64_t id) { taosHashRemove(tscTableMetaMap, fname, len); tscDebug("0x%"PRIx64" remove table meta %s, numOfRemain:%d", id, fname, (int32_t) taosHashGetSize(tscTableMetaMap)); } - \ No newline at end of file From 44be3ee34df9b406f00cf1da458ce22649d14928 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:11:11 +0800 Subject: [PATCH 074/165] fix bug --- src/client/src/tscSubquery.c | 6 +++--- src/client/src/tscUtil.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 32955bef3c..07ea4bf77f 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2737,6 +2737,9 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; @@ -2748,9 +2751,6 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO return; } - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - pQueryInfo = tscGetQueryInfo(&userSql->cmd); executeQuery(userSql, pQueryInfo); } else { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 75473c7baf..9623b247a5 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3773,6 +3773,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; @@ -3786,9 +3789,6 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - executeQuery(userSql, pQueryInfo); return; } From 6adcba9dd78f3d25cbde2f9a435d62e0d8600d07 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 10:23:56 +0800 Subject: [PATCH 075/165] fix bug --- src/client/src/tscSubquery.c | 10 +++++----- src/client/src/tscUtil.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 07ea4bf77f..f709be3767 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2729,17 +2729,17 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; - code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 9623b247a5..3583c8140c 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3765,16 +3765,16 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + + doCleanupSubqueries(userSql, userSql->subState.numOfSub); + userSql->subState.numOfSub = 0; + pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); - - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From fa6f3c2fdb470452830d35c5008d87f27aa46d60 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 17 Aug 2021 11:13:03 +0800 Subject: [PATCH 076/165] change version number --- cmake/version.inc | 2 +- snap/snapcraft.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/version.inc b/cmake/version.inc index 12dc2b1b16..148c33106a 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.1.7.0") + SET(TD_VER_NUMBER "2.1.7.1") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 260af6f21b..859d40cf69 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.1.7.0' +version: '2.1.7.1' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.1.7.0 + - usr/lib/libtaos.so.2.1.7.1 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so From 6dac76a70d5de3c54832389a5ed57206f73cc9de Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 16:53:42 +0800 Subject: [PATCH 077/165] stop taosd using kill -9 --- tests/pytest/util/dnodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index ae4ba97eb3..698c3caa55 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -556,7 +556,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( From 9f0bbd0cb3a3148144a831306a2025b1a0e90b53 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Mon, 16 Aug 2021 18:03:24 +0800 Subject: [PATCH 078/165] test --- tests/pytest/util/dnodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 698c3caa55..0f4919ba96 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -436,7 +436,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( @@ -445,7 +445,7 @@ class TDDnodes: psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") while(processID): - killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + killCmd = "kill -9 %s > /dev/null 2>&1" % processID os.system(killCmd) time.sleep(1) processID = subprocess.check_output( From 81cf139d0eef263e3a312ec7d0079ccade23f2b1 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Tue, 17 Aug 2021 11:32:03 +0800 Subject: [PATCH 079/165] [TD-6101]reduce python case execution time --- tests/pytest/fulltest.sh | 3 ++- tests/pytest/test.py | 3 +-- tests/script/jenkins/basic.txt | 14 ++++++++------ tests/test-all.sh | 3 +++ 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 600582098e..e444a0a318 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -152,6 +152,7 @@ python3 ./test.py -f import_merge/importCSV.py python3 ./test.py -f import_merge/import_update_0.py python3 ./test.py -f import_merge/import_update_1.py python3 ./test.py -f import_merge/import_update_2.py +python3 ./test.py -f update/merge_commit_data.py #======================p1-end=============== #======================p2-start=============== # tools @@ -180,7 +181,7 @@ python3 ./test.py -f update/allow_update-0.py python3 ./test.py -f update/append_commit_data.py python3 ./test.py -f update/append_commit_last-0.py python3 ./test.py -f update/append_commit_last.py -python3 ./test.py -f update/merge_commit_data.py + python3 ./test.py -f update/merge_commit_data2.py python3 ./test.py -f update/merge_commit_data2_update0.py diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 65abd3ef93..97dca6be18 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -87,7 +87,7 @@ if __name__ == "__main__": else: toBeKilled = "valgrind.bin" - killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled processID = subprocess.check_output(psCmd, shell=True) @@ -110,7 +110,6 @@ if __name__ == "__main__": time.sleep(2) tdLog.info('stop All dnodes') - sys.exit(0) tdDnodes.init(deployPath) tdDnodes.setTestCluster(testCluster) diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 6ad6a74eed..b4aad278d8 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -90,6 +90,14 @@ cd ../../../debug; make ./test.sh -f general/parser/function.sim ./test.sh -f unique/cluster/vgroup100.sim +./test.sh -f unique/http/admin.sim +./test.sh -f unique/http/opentsdb.sim + +./test.sh -f unique/import/replica2.sim +./test.sh -f unique/import/replica3.sim + +./test.sh -f general/alter/cached_schema_after_alter.sim + #======================b1-end=============== #======================b2-start=============== @@ -198,13 +206,7 @@ cd ../../../debug; make #======================b3-end=============== #======================b4-start=============== -./test.sh -f unique/http/admin.sim -./test.sh -f unique/http/opentsdb.sim -./test.sh -f unique/import/replica2.sim -./test.sh -f unique/import/replica3.sim - -./test.sh -f general/alter/cached_schema_after_alter.sim ./test.sh -f general/alter/count.sim ./test.sh -f general/alter/dnode.sim ./test.sh -f general/alter/import.sim diff --git a/tests/test-all.sh b/tests/test-all.sh index 2578030165..b54857cf88 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -179,6 +179,9 @@ function runPyCaseOneByOnefq() { start_time=`date +%s` date +%F\ %T | tee -a pytest-out.log echo -n $case + if [[ $1 =~ full ]] ; then + line=$line" -s" + fi $line > case.log 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ echo -e "${RED} failed${NC}" | tee -a pytest-out.log From b65bc4dc18ac8e346c61948a6a5d2d62600f36bd Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:35:55 +0800 Subject: [PATCH 080/165] fix bug --- src/client/src/tscSubquery.c | 3 +-- src/client/src/tscUtil.c | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index f709be3767..dfdbe9fd0b 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2731,8 +2731,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; + tscFreeSubobj(userSql); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 3583c8140c..bc55357ddf 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,8 +3767,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - doCleanupSubqueries(userSql, userSql->subState.numOfSub); - userSql->subState.numOfSub = 0; + tscFreeSubobj(userSql); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; @@ -3815,6 +3814,7 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pSql->subState.numOfSub = (int32_t) taosArrayGetSize(pQueryInfo->pUpstream); assert(pSql->pSubs == NULL); pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); + assert(pSql->subState.states == NULL); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); code = pthread_mutex_init(&pSql->subState.mutex, NULL); From 53aa829a8629c0098261d2c54ffdec1ad3867af5 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:40:14 +0800 Subject: [PATCH 081/165] fix bug --- src/client/src/tscSubquery.c | 3 ++- src/client/src/tscUtil.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index dfdbe9fd0b..4576af6e81 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2731,7 +2731,8 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeSubobj(userSql); + tscFreeSubobj(userSql); + tfree(pSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index bc55357ddf..cb797b5ceb 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,7 +3767,8 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeSubobj(userSql); + tscFreeSubobj(userSql); + tfree(pSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; From 83ec49107996a241ff3bd234e0a303bbb603eac3 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 11:48:54 +0800 Subject: [PATCH 082/165] fix bug --- src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 4576af6e81..a27a8a41a1 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2732,7 +2732,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeSubobj(userSql); - tfree(pSql->pSubs); + tfree(userSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index cb797b5ceb..ba533bb03b 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3768,7 +3768,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeSubobj(userSql); - tfree(pSql->pSubs); + tfree(userSql->pSubs); pParentSql->res.code = TSDB_CODE_SUCCESS; pParentSql->retry++; From f85b81e0560b4ba9e4c82b2c8ff30be3a89ee7d3 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 12:47:48 +0800 Subject: [PATCH 083/165] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index a27a8a41a1..b4fb832b14 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2740,6 +2740,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); + tscResetSqlCmd(&userSql->cmd, false); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ba533bb03b..d5323625da 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3775,6 +3775,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, tstrerror(code), pParentSql->retry); + + + tscResetSqlCmd(&userSql->cmd, false); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From 176af30aecb94b3698e65211ae542fbab304d619 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 12:59:30 +0800 Subject: [PATCH 084/165] fix mem leak --- src/client/inc/tscSubquery.h | 2 ++ src/client/src/tscSubquery.c | 15 +++++++++++++++ src/client/src/tscUtil.c | 2 ++ 3 files changed, 19 insertions(+) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f21ce67e9e..9e16f3f900 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -52,6 +52,8 @@ int tsInsertInitialCheck(SSqlObj *pSql); void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); +void tscFreeRetrieveSupporters(SSqlObj *pSql); + #ifdef __cplusplus } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index b4fb832b14..9761c5b4ca 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2050,6 +2050,19 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { } } +void tscFreeRetrieveSupporters(SSqlObj *pSql) { + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + assert(pSub != NULL); + + SRetrieveSupport* pSupport = pSub->param; + + tfree(pSupport->localBuffer); + tfree(pSub->param); + } +} + + void tscLockByThread(int64_t *lockedBy) { int64_t tid = taosGetSelfPthreadId(); int i = 0; @@ -2731,6 +2744,8 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + tscFreeRetrieveSupporters(pParentSql); + tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index d5323625da..67b1a1c199 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,6 +3767,8 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + tscFreeRetrieveSupporters(pParentSql); + tscFreeSubobj(userSql); tfree(userSql->pSubs); From 3885f10f8e2c6a1a62610be172dd32dfa2f5f7e1 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 16 Aug 2021 09:41:50 +0800 Subject: [PATCH 085/165] []:run all ci tests first --- src/client/inc/tscUtil.h | 1 + src/client/src/tscSQLParser.c | 12 ++++++++---- src/client/src/tscUtil.c | 21 +++++++++++++++++++++ 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 0b2d4fd115..e11748efbe 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -143,6 +143,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo); bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo); bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo); bool tscGroupbyColumn(SQueryInfo* pQueryInfo); +int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo); bool tscIsTopBotQuery(SQueryInfo* pQueryInfo); bool hasTagValOutput(SQueryInfo* pQueryInfo); bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4e5245742c..968d62d939 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5613,11 +5613,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pQueryInfo->groupbyExpr.orderType = p1->sortOrder; pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } else if (isTopBottomQuery(pQueryInfo)) { + int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo); + assert(topBotIndex >= 1); /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, topBotIndex); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg2); } @@ -5706,11 +5708,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); } else { + int32_t topBotIndex = tscGetTopBotQueryExprIndex(pQueryInfo); + assert(topBotIndex >= 1); /* order of top/bottom query in interval is not valid */ - SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + SExprInfo* pExpr = tscExprGet(pQueryInfo, topBotIndex-1); assert(pExpr->base.functionId == TSDB_FUNC_TS); - pExpr = tscExprGet(pQueryInfo, 1); + pExpr = tscExprGet(pQueryInfo, topBotIndex); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { return invalidOperationMsg(pMsgBuf, msg2); } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 612d6a5642..6e0bf6f7d2 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -369,6 +369,27 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) { return false; } +int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) { + size_t numOfExprs = tscNumOfExprs(pQueryInfo); + + for (int32_t i = 0; i < numOfExprs; ++i) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, i); + if (pExpr == NULL) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TS) { + continue; + } + + if (pExpr->base.functionId == TSDB_FUNC_TOP || pExpr->base.functionId == TSDB_FUNC_BOTTOM) { + return i; + } + } + + return -1; +} + bool tscIsTopBotQuery(SQueryInfo* pQueryInfo) { size_t numOfExprs = tscNumOfExprs(pQueryInfo); From e9ffa8b13a24f6136e7f1b4c104da471ab675db4 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 17 Aug 2021 11:48:42 +0800 Subject: [PATCH 086/165] [TD-6017]:add test case that top/bottom expr is not first expression --- tests/script/general/parser/limit.sim | 5 +++++ tests/script/general/parser/limit_tb.sim | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/tests/script/general/parser/limit.sim b/tests/script/general/parser/limit.sim index 23b85095c5..3af2cb3018 100644 --- a/tests/script/general/parser/limit.sim +++ b/tests/script/general/parser/limit.sim @@ -75,4 +75,9 @@ sleep 100 run general/parser/limit_tb.sim run general/parser/limit_stb.sim +print ========> TD-6017 +sql use $db +sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 order by ts desc limit 3 offset 1) +sql select * from (select ts, top(c1, 5) from $stb where ts >= $ts0 order by ts desc limit 3 offset 1) + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/limit_tb.sim b/tests/script/general/parser/limit_tb.sim index 0c987d88c9..4a93797d40 100644 --- a/tests/script/general/parser/limit_tb.sim +++ b/tests/script/general/parser/limit_tb.sim @@ -355,6 +355,10 @@ sql select top(c1, 1) from $tb where ts >= $ts0 and ts <= $tsu limit 5 offset 1 if $rows != 0 then return -1 endi + +print ========> TD-6017 +sql select * from (select ts, top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1) + sql select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 print select top(c1, 5) from $tb where ts >= $ts0 and ts <= $tsu order by ts desc limit 3 offset 1 print $data00 $data01 From b9d5df6761cfedcf8a6dec148f5470840cb98c2d Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 13:54:15 +0800 Subject: [PATCH 087/165] fix crash issue --- src/client/src/tscSubquery.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 9761c5b4ca..5464d4168a 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2054,11 +2054,8 @@ void tscFreeRetrieveSupporters(SSqlObj *pSql) { for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { SSqlObj* pSub = pSql->pSubs[i]; assert(pSub != NULL); - - SRetrieveSupport* pSupport = pSub->param; - - tfree(pSupport->localBuffer); - tfree(pSub->param); + + tscFreeRetrieveSup(pSub); } } From e43b3c330ad47f447295e6ce789b3c84c3a4d214 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 13:56:11 +0800 Subject: [PATCH 088/165] fix issue --- src/client/src/tscSubquery.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 5464d4168a..04267421cf 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2050,16 +2050,6 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { } } -void tscFreeRetrieveSupporters(SSqlObj *pSql) { - for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - SSqlObj* pSub = pSql->pSubs[i]; - assert(pSub != NULL); - - tscFreeRetrieveSup(pSub); - } -} - - void tscLockByThread(int64_t *lockedBy) { int64_t tid = taosGetSelfPthreadId(); int i = 0; @@ -2586,6 +2576,18 @@ static void tscFreeRetrieveSup(SSqlObj *pSql) { tfree(trsupport); } +void tscFreeRetrieveSupporters(SSqlObj *pSql) { + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + assert(pSub != NULL); + + tscFreeRetrieveSup(pSub); + } +} + + + + static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfRows); static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows); From 829055102fa12bce12cb33eb8ad2d97676fc9449 Mon Sep 17 00:00:00 2001 From: cpwu Date: Tue, 17 Aug 2021 13:57:47 +0800 Subject: [PATCH 089/165] [TD-5798] add case for td5798 --- tests/pytest/functions/queryTestCases.py | 214 +++++++++++++++++++---- 1 file changed, 183 insertions(+), 31 deletions(-) diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py index 8cd3ef6b3a..f320db43af 100644 --- a/tests/pytest/functions/queryTestCases.py +++ b/tests/pytest/functions/queryTestCases.py @@ -58,7 +58,7 @@ class TDTestCase: def td3690(self): tdLog.printNoPrefix("==========TD-3690==========") tdSql.query("show variables") - tdSql.checkData(51, 1, 864000) + tdSql.checkData(53, 1, 864000) def td4082(self): tdLog.printNoPrefix("==========TD-4082==========") @@ -268,7 +268,7 @@ class TDTestCase: tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") tdSql.query("show variables") - tdSql.checkData(36, 1, 3650) + tdSql.checkData(38, 1, 3650) tdSql.query("show databases") tdSql.checkData(0,7,"3650,3650,3650") @@ -296,7 +296,7 @@ class TDTestCase: tdSql.query("show databases") tdSql.checkData(0, 7, "3650,3650,3650") tdSql.query("show variables") - tdSql.checkData(36, 1, 3650) + tdSql.checkData(38, 1, 3650) tdSql.execute("alter database db1 keep 365") tdSql.execute("drop database if exists db1") @@ -613,45 +613,189 @@ class TDTestCase: pass def td5798(self): - tdLog.printNoPrefix("==========TD-5798==========") + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db keep 3650") tdSql.execute("use db") tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") - tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 int) tags(t2 binary(16), t3 binary(16), t4 int)") - for i in range(100): - sql = f"create table db.t{i} using db.stb1 tags({i%7}, {(i-1)%7}, {i%2})" + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" tdSql.execute(sql) tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") - tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2}, {(i-1)%3})") - tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") - tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i}', '{100-i}', {i%3})") - tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, {(i+1)%3})") - tdSql.execute(f"insert into db.t0{i} values (now-9d, {i*2}, {(i+2)%3})") - tdSql.execute(f"insert into db.t0{i} values (now-8d, {i*3}, {(i)%3})") + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query("select distinct c1, c2 from stb1 order by ts") + tdSql.checkRows(tbnum*3+1) + tdSql.query("select distinct c1, c2 from t1 order by ts") + tdSql.checkRows(4) + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(4) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== tdSql.query("select distinct t1 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t1 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t1, t0 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t1, t2 from stb1") - tdSql.checkRows(14) + tdSql.checkRows(maxRemainderNum*2+1) tdSql.query("select distinct t0, t1, t2 from stb1") - tdSql.checkRows(14) + tdSql.checkRows(maxRemainderNum*2+1) tdSql.query("select distinct t0 t1, t1 t2 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t0, t0 from stb1") - tdSql.checkRows(7) + tdSql.checkRows(maxRemainderNum+1) tdSql.query("select distinct t0, t1 from t1") tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) ########## should be error ######### tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") tdSql.error("select distinct t2 from ") tdSql.error("distinct t2 from stb1") tdSql.error("select distinct stb1") @@ -678,29 +822,37 @@ class TDTestCase: tdSql.checkRows(1) tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") - # tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") tdSql.checkRows(5) tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") tdSql.checkRows(4) - tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") - tdSql.checkRows(1) - tdSql.query("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") - tdSql.checkRows(1) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") tdSql.checkRows(1) tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") tdSql.checkRows(1) - # tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") - tdSql.query("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") - + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") pass def td5935(self): - tdLog.printNoPrefix("==========TD-5798==========") + tdLog.printNoPrefix("==========TD-5935==========") tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db keep 3650") @@ -739,8 +891,8 @@ class TDTestCase: # self.td4082() # self.td4288() # self.td4724() - # self.td5798() - self.td5935() + self.td5798() + # self.td5935() # develop branch # self.td4097() From 2fde105908837521e34cc03b048d1276466df1b6 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:04:27 +0800 Subject: [PATCH 090/165] fix mem leak --- src/client/src/tscSubquery.c | 2 +- src/client/src/tscUtil.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 04267421cf..9ee735de45 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2743,7 +2743,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeRetrieveSupporters(pParentSql); + tscFreeRetrieveSup(pParentSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 67b1a1c199..ad04550e77 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3767,7 +3767,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - tscFreeRetrieveSupporters(pParentSql); + tscFreeRetrieveSup(pParentSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); From a299c05c96c0e8cef4e150626dc364143ca03f7e Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:07:04 +0800 Subject: [PATCH 091/165] fix bug --- src/client/inc/tscSubquery.h | 3 +++ src/client/src/tscSubquery.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 9e16f3f900..99215551c3 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -54,6 +54,9 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); void tscFreeRetrieveSupporters(SSqlObj *pSql); +void tscFreeRetrieveSup(SSqlObj *pSql); + + #ifdef __cplusplus } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 9ee735de45..56cee02f4d 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2562,7 +2562,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } -static void tscFreeRetrieveSup(SSqlObj *pSql) { +void tscFreeRetrieveSup(SSqlObj *pSql) { SRetrieveSupport *trsupport = pSql->param; void* p = atomic_val_compare_exchange_ptr(&pSql->param, trsupport, 0); From 010db435a49f6936b5768ae12af765b0f61ebd05 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 14:50:19 +0800 Subject: [PATCH 092/165] fix crash issue --- src/query/src/qExecutor.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 844b8ec683..f59270d40a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1604,6 +1604,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); + int32_t startInterp = (pResultRowInfo->curPos == -1); STimeWindow win = getCurrentActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); @@ -1625,7 +1626,10 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + if (!startInterp) { + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + } + startInterp = 0; preWin = win; int32_t prevEndPos = (forwardStep - 1) * step + startPos; From ecb66a1598a10c6ca11afaf73df0730009fd9adc Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 15:18:37 +0800 Subject: [PATCH 093/165] fix bug --- src/client/src/tscSQLParser.c | 4 ++++ src/query/src/qExecutor.c | 6 +----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4e5245742c..9bda1458f4 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5382,6 +5382,7 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo const char* msg3 = "top/bottom not support fill"; const char* msg4 = "illegal value or data overflow"; const char* msg5 = "fill only available for interval query"; + const char* msg6 = "not supported function now"; if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); @@ -5420,6 +5421,9 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo } } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_PREV; + if (tscIsPointInterpQuery(pQueryInfo) && pQueryInfo->order.order == TSDB_ORDER_DESC) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); + } } else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) { pQueryInfo->fillType = TSDB_FILL_NEXT; } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f59270d40a..844b8ec683 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1604,7 +1604,6 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); - int32_t startInterp = (pResultRowInfo->curPos == -1); STimeWindow win = getCurrentActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); @@ -1626,10 +1625,7 @@ static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pRe // window start(end) key interpolation doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); - if (!startInterp) { - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); - } - startInterp = 0; + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, ascQuery ? &win : &preWin, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); preWin = win; int32_t prevEndPos = (forwardStep - 1) * step + startPos; From 89028123997938fa5ccf07280207feacea58f894 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 16:04:48 +0800 Subject: [PATCH 094/165] fix bug --- src/client/src/tscSubquery.c | 1 + src/client/src/tscUtil.c | 1 + 2 files changed, 2 insertions(+) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 56cee02f4d..c5afbe6dba 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2744,6 +2744,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); + tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index ad04550e77..a79f50b63d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3768,6 +3768,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); + tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); From bd0b2e4b484da282f7b81e580c94ce327b3a5aa7 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 17 Aug 2021 16:17:39 +0800 Subject: [PATCH 095/165] [TD-6070] add test case --- tests/pytest/functions/function_interp.py | 70 ++++++++++++++++++++--- 1 file changed, 61 insertions(+), 9 deletions(-) diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py index 810c90279c..41215f15eb 100644 --- a/tests/pytest/functions/function_interp.py +++ b/tests/pytest/functions/function_interp.py @@ -26,18 +26,70 @@ class TDTestCase: self.rowNum = 10 self.ts = 1537146000000 - + def run(self): tdSql.prepare() - tdSql.execute("create table t(ts timestamp, k int)") - tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);") - - tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 12) + tdSql.execute("create table ap1 (ts timestamp, pav float)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.119', 2.90799)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.317', 3.07399)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.517', 0.58117)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.717', 0.16150)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:54.918', 1.47885)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:56.569', 1.76472)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.381', 2.13722)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.574', 4.10256)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.776', 3.55345)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:57.976', 1.46624)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.187', 0.17943)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.372', 2.04101)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.573', 3.20924)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.768', 1.71807)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:58.964', 4.60900)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.155', 4.33907)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.359', 0.76940)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.553', 0.06458)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.742', 4.59857)") + tdSql.execute("insert into ap1 values ('2021-07-25 02:19:59.938', 1.55081)") + + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR)") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)") + tdSql.checkRows(6) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT)") + tdSql.checkRows(6) + tdSql.checkData(0,1,2.90799) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV)") + tdSql.checkRows(7) + tdSql.checkData(1,1,1.47885) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR)") + tdSql.checkRows(7) + + # check desc order + tdSql.error("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (PREV) order by ts desc") + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (NEXT) order by ts desc") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts = '2021-07-25 02:19:54' FILL (LINEAR) order by ts desc") + tdSql.checkRows(0) + tdSql.query("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc") + tdSql.checkRows(6) + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' INTERVAL(1000a) FILL (NEXT) order by ts desc") + tdSql.checkRows(6) + tdSql.checkData(0,1,4.60900) + tdSql.error("select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (PREV) order by ts desc") + tdSql.query("select interp(pav) from ap1 where ts>= '2021-07-25 02:19:54' and ts <= '2021-07-25 02:20:00' INTERVAL(1000a) FILL (LINEAR) order by ts desc") + tdSql.checkRows(7) + + # check exception + tdSql.error("select interp(*) from ap1") + tdSql.error("select interp(*) from ap1 FILL(NEXT)") + tdSql.error("select interp(*) from ap1 ts >= '2021-07-25 02:19:54' FILL(NEXT)") + tdSql.error("select interp(*) from ap1 ts <= '2021-07-25 02:19:54' FILL(NEXT)") + tdSql.error("select interp(*) from ap1 where ts >'2021-07-25 02:19:59.938' and ts < now interval(1s) fill(next)") - tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)") - def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) From 55c8c15742aa29e09494bbfbf19b9ee4e52010e0 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 17 Aug 2021 16:32:38 +0800 Subject: [PATCH 096/165] fix msg null issue --- src/client/src/tscSQLParser.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 968d62d939..e580232f01 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4524,7 +4524,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql */ tSqlExprDestroy(*pExpr); } else { - ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pCmd->payload); } *pExpr = NULL; // remove this expression @@ -4562,7 +4562,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql } pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; - ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pCmd->payload); *pExpr = NULL; } else { // do nothing @@ -4580,7 +4580,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } - ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg); + ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pCmd->payload); *pExpr = NULL; // remove it from expr tree } From f98b41bb4f07fe8ad2f66e02091dde9cdb73b6e3 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 17 Aug 2021 17:04:10 +0800 Subject: [PATCH 097/165] [TD-6165]: use memcpy to replace wcsncmp for nchar type comparision --- src/util/src/tcompare.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 921da82d44..1b980a4a1d 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -199,16 +199,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - memcpy(pLeftTerm, varDataVal(pLeft), len1); - memcpy(pRightTerm, varDataVal(pRight), len2); - - int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); - - tfree(pLeftTerm); - tfree(pRightTerm); - + int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1); if (ret == 0) { return 0; } else { @@ -518,17 +509,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } - - char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); - char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); - memcpy(t1_term, t1->data, t1->len); - memcpy(t2_term, t2->data, t2->len); - - int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); - - tfree(t1_term); - tfree(t2_term); - + int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len); if (ret == 0) { return ret; } From 7e7b67146a4b91b33f3590efe39d20dc4c21801b Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 17 Aug 2021 17:04:10 +0800 Subject: [PATCH 098/165] [TD-6165]: use memcpy to replace wcsncmp for nchar type comparision --- src/util/src/tcompare.c | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 3619dad83b..f8deb65d48 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -129,16 +129,7 @@ int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { if (len1 != len2) { return len1 > len2? 1:-1; } else { - char *pLeftTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - char *pRightTerm = (char *)tcalloc(len1 + 1, sizeof(char)); - memcpy(pLeftTerm, varDataVal(pLeft), len1); - memcpy(pRightTerm, varDataVal(pRight), len2); - - int32_t ret = wcsncmp((wchar_t*) pLeftTerm, (wchar_t*) pRightTerm, len1/TSDB_NCHAR_SIZE); - - tfree(pLeftTerm); - tfree(pRightTerm); - + int32_t ret = memcmp((wchar_t*) pLeft, (wchar_t*) pRight, len1); if (ret == 0) { return 0; } else { @@ -418,17 +409,7 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { if (t1->len != t2->len) { return t1->len > t2->len? 1:-1; } - - char *t1_term = (char *)tcalloc(t1->len + 1, sizeof(char)); - char *t2_term = (char *)tcalloc(t2->len + 1, sizeof(char)); - memcpy(t1_term, t1->data, t1->len); - memcpy(t2_term, t2->data, t2->len); - - int32_t ret = wcsncmp((wchar_t*) t1_term, (wchar_t*) t2_term, t2->len/TSDB_NCHAR_SIZE); - - tfree(t1_term); - tfree(t2_term); - + int32_t ret = memcmp((wchar_t*) t1, (wchar_t*) t2, t2->len); if (ret == 0) { return ret; } From 2bed48b40274b9b69d494664a1f37ba3017575f9 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 18:38:33 +0800 Subject: [PATCH 099/165] [TD-6166]: pointer not initialized led taos.exe exit on windows. (#7425) --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index ec1b606bf6..62e701bf3a 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -521,7 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; From 0c7cf3a918f01c51b8d8992fb64def668db0b338 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 18:39:02 +0800 Subject: [PATCH 100/165] [TD-6166]: pointer not initialized led taos.exe exit on windows. (#7424) --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index ec1b606bf6..62e701bf3a 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -521,7 +521,7 @@ int ResolveLinksA(const char *path, char *buf, size_t bufsize) { /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpathU(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; char *pPath1 = NULL; char *pPath2 = NULL; int iErr; From 2d62d2fe2e7e092e8c7bddf3510b304e6275358b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 19:22:48 +0800 Subject: [PATCH 101/165] Hotfix/sangshuduo/td 6166 pointer not init for 2171 (#7428) * [TD-6166]: pointer not initialized led taos.exe exit on windows. * another line. --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 62e701bf3a..e2ba755f2d 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,7 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; int iErr; const char *pc; From 5e15ac45fc1c9d341d3fcc18ed6c72b663872055 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 17 Aug 2021 19:24:37 +0800 Subject: [PATCH 102/165] Hotfix/sangshuduo/td 6166 pointer not init (#7427) * [TD-6166]: pointer not initialized led taos.exe exit on windows. * another line. --- deps/MsvcLibX/src/realpath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/MsvcLibX/src/realpath.c b/deps/MsvcLibX/src/realpath.c index 62e701bf3a..e2ba755f2d 100644 --- a/deps/MsvcLibX/src/realpath.c +++ b/deps/MsvcLibX/src/realpath.c @@ -196,7 +196,7 @@ not_compact_enough: /* Normally defined in stdlib.h. Output buf must contain PATH_MAX bytes */ char *realpath(const char *path, char *outbuf) { char *pOutbuf = outbuf; - char *pOutbuf1; + char *pOutbuf1 = NULL; int iErr; const char *pc; From 19feac31c96a61210e592589b4f376d3ea4df710 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 18 Aug 2021 01:13:22 +0000 Subject: [PATCH 103/165] [TD-6198] fix bigint sum error --- src/common/src/ttypes.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index eeffe49adc..ee940531e6 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -38,11 +38,7 @@ const int32_t TYPE_BYTES[15] = { #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ do { \ - if (_list[(_index)] >= (INT64_MAX - (__sum))) { \ - __sum = INT64_MAX; \ - } else { \ - (__sum) += (_list)[(_index)]; \ - } \ + (__sum) += (_list)[(_index)]; \ if ((__min) > (_list)[(_index)]) { \ (__min) = (_list)[(_index)]; \ (__minIndex) = (_index); \ From 35b82a644f559a240f78cde78af37bd778dd4626 Mon Sep 17 00:00:00 2001 From: wpan Date: Wed, 18 Aug 2021 09:40:18 +0800 Subject: [PATCH 104/165] fix nested query bug --- src/client/inc/tscSubquery.h | 2 -- src/client/src/tscSubquery.c | 42 ++++++++---------------------------- src/client/src/tscUtil.c | 23 +++++++++----------- 3 files changed, 19 insertions(+), 48 deletions(-) diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index 99215551c3..a012ca5a7f 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -52,8 +52,6 @@ int tsInsertInitialCheck(SSqlObj *pSql); void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs); -void tscFreeRetrieveSupporters(SSqlObj *pSql); - void tscFreeRetrieveSup(SSqlObj *pSql); diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index c5afbe6dba..d16d744393 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2040,11 +2040,8 @@ void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) { for(int32_t i = 0; i < numOfSubs; ++i) { SSqlObj* pSub = pSql->pSubs[i]; assert(pSub != NULL); - - SRetrieveSupport* pSupport = pSub->param; - - tfree(pSupport->localBuffer); - tfree(pSupport); + + tscFreeRetrieveSup(pSub); taos_free_result(pSub); } @@ -2576,18 +2573,6 @@ void tscFreeRetrieveSup(SSqlObj *pSql) { tfree(trsupport); } -void tscFreeRetrieveSupporters(SSqlObj *pSql) { - for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { - SSqlObj* pSub = pSql->pSubs[i]; - assert(pSub != NULL); - - tscFreeRetrieveSup(pSub); - } -} - - - - static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfRows); static void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows); @@ -2732,30 +2717,21 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { int32_t code = pParentSql->res.code; - if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry) { - // remove the cached tableMeta and vgroup id list, and then parse the sql again - SSqlCmd* pParentCmd = &pParentSql->cmd; - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0); - tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); - - pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); - pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) { tscFreeRetrieveSup(pParentSql); - tscFreeRetrieveSup(userSql); tscFreeSubobj(userSql); tfree(userSql->pSubs); - pParentSql->res.code = TSDB_CODE_SUCCESS; - pParentSql->retry++; + userSql->res.code = TSDB_CODE_SUCCESS; + userSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, - tstrerror(code), pParentSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, + tstrerror(code), userSql->retry); - tscResetSqlCmd(&userSql->cmd, false); + tscResetSqlCmd(&userSql->cmd, true); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a79f50b63d..06b61d443d 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3758,29 +3758,26 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { // todo refactor tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self); - SSqlCmd* pParentCmd = &pParentSql->cmd; - STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pParentCmd, 0); - tscRemoveTableMetaBuf(pTableMetaInfo, pParentSql->self); - - pParentCmd->pTableMetaMap = tscCleanupTableMetaMap(pParentCmd->pTableMetaMap); - pParentCmd->pTableMetaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; tscFreeRetrieveSup(pParentSql); - tscFreeRetrieveSup(userSql); + + if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry)) { + tscAsyncResultOnError(pParentSql); + return; + } tscFreeSubobj(userSql); tfree(userSql->pSubs); - pParentSql->res.code = TSDB_CODE_SUCCESS; - pParentSql->retry++; + userSql->res.code = TSDB_CODE_SUCCESS; + userSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, - tstrerror(code), pParentSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, + tstrerror(code), userSql->retry); - tscResetSqlCmd(&userSql->cmd, false); + tscResetSqlCmd(&userSql->cmd, true); code = tsParseSql(userSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { From cdac4b6d4029f06e64506bfe76ebda79a75ca91e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 18 Aug 2021 02:53:55 +0000 Subject: [PATCH 105/165] [TD-6088] handel bigint sum --- src/common/src/ttypes.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index eeffe49adc..ee940531e6 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -38,11 +38,7 @@ const int32_t TYPE_BYTES[15] = { #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ do { \ - if (_list[(_index)] >= (INT64_MAX - (__sum))) { \ - __sum = INT64_MAX; \ - } else { \ - (__sum) += (_list)[(_index)]; \ - } \ + (__sum) += (_list)[(_index)]; \ if ((__min) > (_list)[(_index)]) { \ (__min) = (_list)[(_index)]; \ (__minIndex) = (_index); \ From 1c0e89de2bbcdbcf0ecb2f388d2b35d4917bb93c Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 18 Aug 2021 14:52:05 +0800 Subject: [PATCH 106/165] enhance performacne when inserting from csv --- src/client/inc/tsclient.h | 8 +- src/client/src/tscParseInsert.c | 384 ++++++++++++++++++++++++++++++-- src/client/src/tscUtil.c | 1 + 3 files changed, 375 insertions(+), 18 deletions(-) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 4ead7d4180..99ed082236 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -123,17 +123,15 @@ typedef struct { int32_t kvLen; // len of SKVRow } SMemRowInfo; typedef struct { - uint8_t memRowType; - uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need - TDRowTLenT dataRowInitLen; + uint8_t memRowType; // default is 0, that is SDataRow + uint8_t compareStat; // 0 no need, 1 need compare TDRowTLenT kvRowInitLen; SMemRowInfo *rowInfo; } SMemRowBuilder; typedef enum { - ROW_COMPARE_UNKNOWN = 0, + ROW_COMPARE_NO_NEED = 0, ROW_COMPARE_NEED = 1, - ROW_COMPARE_NO_NEED = 2, } ERowCompareStat; int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 89e3832007..dab0dff1fc 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -53,18 +53,18 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 if (nBoundCols == 0) { // file input pBuilder->memRowType = SMEM_ROW_DATA; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; + // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else { float boundRatio = ((float)nBoundCols / (float)nCols); if (boundRatio < KVRatioKV) { pBuilder->memRowType = SMEM_ROW_KV; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; + // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else if (boundRatio > KVRatioData) { pBuilder->memRowType = SMEM_ROW_DATA; - pBuilder->compareStat = ROW_COMPARE_NO_NEED; + // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } pBuilder->compareStat = ROW_COMPARE_NEED; @@ -76,7 +76,6 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } } - pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx); if (nRows > 0) { @@ -86,7 +85,7 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } for (int i = 0; i < nRows; ++i) { - (pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen; + (pBuilder->rowInfo + i)->dataLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen; } } @@ -449,6 +448,370 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { return TSDB_CODE_SUCCESS; } +static int32_t tsParseOneColumnOld(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, + bool primaryKey, int16_t timePrec) { + int64_t iv; + int32_t ret; + char * endptr = NULL; + + if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { + return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); + } + + switch (pSchema->type) { + case TSDB_DATA_TYPE_BOOL: { // bool + if (isNullStr(pToken)) { + *((uint8_t *)payload) = TSDB_DATA_BOOL_NULL; + } else { + if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { + if (strncmp(pToken->z, "true", pToken->n) == 0) { + *(uint8_t *)payload = TSDB_TRUE; + } else if (strncmp(pToken->z, "false", pToken->n) == 0) { + *(uint8_t *)payload = TSDB_FALSE; + } else { + return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); + } + } else if (pToken->type == TK_INTEGER) { + iv = strtoll(pToken->z, NULL, 10); + *(uint8_t *)payload = (int8_t)((iv == 0) ? TSDB_FALSE : TSDB_TRUE); + } else if (pToken->type == TK_FLOAT) { + double dv = strtod(pToken->z, NULL); + *(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE); + } else { + return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); + } + } + break; + } + + case TSDB_DATA_TYPE_TINYINT: + if (isNullStr(pToken)) { + *((uint8_t *)payload) = TSDB_DATA_TINYINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); + } else if (!IS_VALID_TINYINT(iv)) { + return tscInvalidOperationMsg(msg, "data overflow", pToken->z); + } + + *((uint8_t *)payload) = (uint8_t)iv; + } + + break; + + case TSDB_DATA_TYPE_UTINYINT: + if (isNullStr(pToken)) { + *((uint8_t *)payload) = TSDB_DATA_UTINYINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); + } else if (!IS_VALID_UTINYINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); + } + + *((uint8_t *)payload) = (uint8_t)iv; + } + + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (isNullStr(pToken)) { + *((int16_t *)payload) = TSDB_DATA_SMALLINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); + } else if (!IS_VALID_SMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); + } + + *((int16_t *)payload) = (int16_t)iv; + } + + break; + + case TSDB_DATA_TYPE_USMALLINT: + if (isNullStr(pToken)) { + *((uint16_t *)payload) = TSDB_DATA_USMALLINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); + } else if (!IS_VALID_USMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); + } + + *((uint16_t *)payload) = (uint16_t)iv; + } + + break; + + case TSDB_DATA_TYPE_INT: + if (isNullStr(pToken)) { + *((int32_t *)payload) = TSDB_DATA_INT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); + } else if (!IS_VALID_INT(iv)) { + return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); + } + + *((int32_t *)payload) = (int32_t)iv; + } + + break; + + case TSDB_DATA_TYPE_UINT: + if (isNullStr(pToken)) { + *((uint32_t *)payload) = TSDB_DATA_UINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); + } else if (!IS_VALID_UINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); + } + + *((uint32_t *)payload) = (uint32_t)iv; + } + + break; + + case TSDB_DATA_TYPE_BIGINT: + if (isNullStr(pToken)) { + *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); + } else if (!IS_VALID_BIGINT(iv)) { + return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); + } + + *((int64_t *)payload) = iv; + } + break; + + case TSDB_DATA_TYPE_UBIGINT: + if (isNullStr(pToken)) { + *((uint64_t *)payload) = TSDB_DATA_UBIGINT_NULL; + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); + } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { + return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); + } + + *((uint64_t *)payload) = iv; + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (isNullStr(pToken)) { + *((int32_t *)payload) = TSDB_DATA_FLOAT_NULL; + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || + isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + // *((float *)payload) = (float)dv; + SET_FLOAT_VAL(payload, dv); + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (isNullStr(pToken)) { + *((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL; + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + *((double *)payload) = dv; + } + break; + + case TSDB_DATA_TYPE_BINARY: + // binary data cannot be null-terminated char string, otherwise the last char of the string is lost + if (pToken->type == TK_NULL) { + setVardataNull(payload, TSDB_DATA_TYPE_BINARY); + } else { // too long values will return invalid sql, not be truncated automatically + if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor + return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); + } + + STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); + } + + break; + + case TSDB_DATA_TYPE_NCHAR: + if (pToken->type == TK_NULL) { + setVardataNull(payload, TSDB_DATA_TYPE_NCHAR); + } else { + // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' + int32_t output = 0; + if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { + char buf[512] = {0}; + snprintf(buf, tListLen(buf), "%s", strerror(errno)); + return tscInvalidOperationMsg(msg, buf, pToken->z); + } + + varDataSetLen(payload, output); + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: { + if (pToken->type == TK_NULL) { + if (primaryKey) { + *((int64_t *)payload) = 0; + } else { + *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; + } + } else { + int64_t temp; + if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); + } + + *((int64_t *)payload) = temp; + } + + break; + } + } + + return TSDB_CODE_SUCCESS; +} + +static int tsParseOneRowOld(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, + char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { + int32_t index = 0; + SStrToken sToken = {0}; + char * payload = pDataBlocks->pData + pDataBlocks->size; + + SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo; + SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta); + + // 1. set the parsed value from sql string + int32_t rowSize = 0; + for (int i = 0; i < spd->numOfBound; ++i) { + // the start position in data block buffer of current value in sql + int32_t colIndex = spd->boundedColumns[i]; + + char * start = payload + spd->cols[colIndex].offset; + SSchema *pSchema = &schema[colIndex]; + rowSize += pSchema->bytes; + + index = 0; + sToken = tStrGetToken(*str, &index, true); + *str += index; + + if (sToken.type == TK_QUESTION) { + if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) { + return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str); + } + + uint32_t offset = (uint32_t)(start - pDataBlocks->pData); + if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) { + continue; + } + + strcpy(pInsertParam->msg, "client out of memory"); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + int16_t type = sToken.type; + if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL && + type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || + (sToken.n == 0) || (type == TK_RP)) { + return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z); + } + + // Remove quotation marks + if (TK_STRING == sToken.type) { + // delete escape character: \\, \', \" + char delim = sToken.z[0]; + + int32_t cnt = 0; + int32_t j = 0; + if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) { + return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z); + } + + for (uint32_t k = 1; k < sToken.n - 1; ++k) { + if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) { + tmpTokenBuf[j] = sToken.z[k + 1]; + + cnt++; + j++; + k++; + continue; + } + + tmpTokenBuf[j] = sToken.z[k]; + j++; + } + + tmpTokenBuf[j] = 0; + sToken.z = tmpTokenBuf; + sToken.n -= 2 + cnt; + } + + bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); + int32_t ret = tsParseOneColumnOld(pSchema, &sToken, start, pInsertParam->msg, str, isPrimaryKey, timePrec); + if (ret != TSDB_CODE_SUCCESS) { + return ret; + } + + if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { + tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); + return TSDB_CODE_TSC_INVALID_TIME_STAMP; + } + } + + // 2. set the null value for the columns that do not assign values + if (spd->numOfBound < spd->numOfCols) { + char *ptr = payload; + + for (int32_t i = 0; i < spd->numOfCols; ++i) { + if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null + if (schema[i].type == TSDB_DATA_TYPE_BINARY) { + varDataSetLen(ptr, sizeof(int8_t)); + *(uint8_t *)varDataVal(ptr) = TSDB_DATA_BINARY_NULL; + } else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) { + varDataSetLen(ptr, sizeof(int32_t)); + *(uint32_t *)varDataVal(ptr) = TSDB_DATA_NCHAR_NULL; + } else { + setNull(ptr, schema[i].type, schema[i].bytes); + } + } + + ptr += schema[i].bytes; + } + + rowSize = (int32_t)(ptr - payload); + } + + *len = rowSize; + return TSDB_CODE_SUCCESS; +} + int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { int32_t index = 0; @@ -460,7 +823,7 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i STableMeta * pTableMeta = pDataBlocks->pTableMeta; SSchema * schema = tscGetTableSchema(pTableMeta); SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; - int32_t dataLen = pBuilder->dataRowInitLen; + int32_t dataLen = spd->allNullLen + TD_MEM_ROW_DATA_HEAD_SIZE; int32_t kvLen = pBuilder->kvRowInitLen; bool isParseBindParam = false; @@ -1698,6 +2061,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow STableComInfo tinfo = tscGetTableInfo(pTableMeta); SInsertStatementParam* pInsertParam = &pCmd->insertParam; + pInsertParam->payloadType = PAYLOAD_TYPE_RAW; destroyTableNameList(pInsertParam); pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks); @@ -1726,12 +2090,6 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow goto _error; } - if (TSDB_CODE_SUCCESS != - (ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams, - pTableDataBlock->boundColumnInfo.allNullLen))) { - goto _error; - } - while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; @@ -1745,7 +2103,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow strtolower(line, line); int32_t len = 0; - code = tsParseOneRow(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam); + code = tsParseOneRowOld(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam); if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) { pSql->res.code = code; break; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 612d6a5642..f3e30172ab 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1744,6 +1744,7 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff dataBuf->tsSource = -1; dataBuf->vgId = dataBuf->pTableMeta->vgId; + tNameAssign(&dataBuf->tableName, name); assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL); From ffcc93016a6a46fb21001e213d5ac26e10172afc Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 18 Aug 2021 16:12:36 +0800 Subject: [PATCH 107/165] Hotfix/sangshuduo/td 5136 taosdemo rework for master (#7433) * cherry pick from develop branch. * [TD-5136]: taosdemo simulate real senario. * update test case according to taosdemo change * adjust range of semi-random data. * make demo mode use different tag name and value. * change malloc to calloc for pid allocation. * fix typo. * fix binary length default value and group id logic. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 18f5877e09..016e27dc13 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -626,7 +626,7 @@ SArguments g_args = { "INT", // datatype "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3 }, - 16, // len_of_binary + 64, // len_of_binary 4, // num_of_CPR 10, // num_of_connections/thread 0, // insert_interval @@ -2598,7 +2598,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { if ((g_args.demo_mode) && (i == 0)) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64",", tableSeq % 10); + "%"PRId64",", (tableSeq % 10) + 1); } else { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, From 53b398c08d904f3c97dd1790747782af08185135 Mon Sep 17 00:00:00 2001 From: fastjrun Date: Wed, 18 Aug 2021 16:19:25 +0800 Subject: [PATCH 108/165] =?UTF-8?q?=E6=96=87=E6=A1=A3=E8=BF=87=E6=97=B6?= =?UTF-8?q?=EF=BC=9A1=E3=80=81tdsdemo=E4=B8=AD=E5=88=9D=E5=A7=8B=E5=8C=96?= =?UTF-8?q?=E7=9A=84=E8=A1=A8=E5=90=8D=E5=89=8D=E7=BC=80=E5=B7=B2=E7=BB=8F?= =?UTF-8?q?=E7=94=B1t=E6=94=B9=E4=B8=BAd=EF=BC=8C2=E3=80=81=E7=9B=B8?= =?UTF-8?q?=E5=85=B3=E8=A1=A8=E5=AD=97=E6=AE=B5=E4=B9=9F=E6=9B=B4=E6=96=B0?= =?UTF-8?q?=EF=BC=9B3=E3=80=81=E7=9B=AE=E5=89=8D=E9=95=9C=E5=83=8F?= =?UTF-8?q?=E5=B7=B2=E7=BB=8F=E6=94=AF=E6=8C=81=E5=A4=9A=E5=B9=B3=E5=8F=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cn/02.getting-started/01.docker/docs.md | 170 +++++++++++------- 1 file changed, 101 insertions(+), 69 deletions(-) diff --git a/documentation20/cn/02.getting-started/01.docker/docs.md b/documentation20/cn/02.getting-started/01.docker/docs.md index 30803d9777..4ac6d96ec1 100644 --- a/documentation20/cn/02.getting-started/01.docker/docs.md +++ b/documentation20/cn/02.getting-started/01.docker/docs.md @@ -1,6 +1,6 @@ # 通过 Docker 快速体验 TDengine -虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。 +虽然并不推荐在生产环境中通过 Docker 来部署 TDengine 服务,但 Docker 工具能够很好地屏蔽底层操作系统的环境差异,很适合在开发测试或初次体验时用于安装运行 TDengine 的工具集。特别是,借助 Docker,能够比较方便地在 Mac OSX 和 Windows 系统上尝试 TDengine,而无需安装虚拟机或额外租用 Linux 服务器。另外,从2.0.14.0版本开始,TDengine提供的镜像已经可以同时支持X86-64、X86、arm64、arm32平台,像NAS、树莓派、嵌入式开发板之类可以运行docker的非主流计算机也可以基于本文档轻松体验TDengine。 下文通过 Step by Step 风格的介绍,讲解如何通过 Docker 快速建立 TDengine 的单节点运行环境,以支持开发和测试。 @@ -12,7 +12,7 @@ Docker 工具自身的下载请参考 [Docker官网文档](https://docs.docker.c ```bash $ docker -v -Docker version 20.10.5, build 55c4c88 +Docker version 20.10.3, build 48d30b5 ``` ## 在 Docker 容器中运行 TDengine @@ -20,21 +20,22 @@ Docker version 20.10.5, build 55c4c88 1,使用命令拉取 TDengine 镜像,并使它在后台运行。 ```bash -$ docker run -d tdengine/tdengine -cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316 +$ docker run -d --name tdengine tdengine/tdengine +7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292 ``` -- **docker run**:通过 Docker 运行一个容器。 -- **-d**:让容器在后台运行。 -- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像。 -- **cdf548465318c6fc2ad97813f89cc60006393392401cae58a27b15ca9171f316**:这个返回的长字符是容器 ID,我们可以通过容器 ID 来查看对应的容器。 +- **docker run**:通过 Docker 运行一个容器 +- **--name tdengine**:设置容器名称,我们可以通过容器名称来查看对应的容器 +- **-d**:让容器在后台运行 +- **tdengine/tdengine**:拉取的 TDengine 官方发布的应用镜像 +- **7760c955f225d72e9c1ec5a4cef66149a7b94dae7598b11eb392138877e7d292**:这个返回的长字符是容器 ID,我们也可以通过容器 ID 来查看对应的容器 2,确认容器是否已经正确运行。 ```bash $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS ··· -cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· +c452519b0f9b tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes ··· ``` - **docker ps**:列出所有正在运行状态的容器信息。 @@ -47,25 +48,25 @@ cdf548465318 tdengine/tdengine "taosd" 14 minutes ago Up 14 minutes · 3,进入 Docker 容器内,使用 TDengine。 ```bash -$ docker exec -it cdf548465318 /bin/bash -root@cdf548465318:~/TDengine-server-2.0.13.0# +$ docker exec -it tdengine /bin/bash +root@c452519b0f9b:~/TDengine-server-2.0.20.13# ``` - **docker exec**:通过 docker exec 命令进入容器,如果退出,容器不会停止。 - **-i**:进入交互模式。 - **-t**:指定一个终端。 -- **cdf548465318**:容器 ID,需要根据 docker ps 指令返回的值进行修改。 +- **c452519b0f9b**:容器 ID,需要根据 docker ps 指令返回的值进行修改。 - **/bin/bash**:载入容器后运行 bash 来进行交互。 4,进入容器后,执行 taos shell 客户端程序。 ```bash -$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos +$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.13.0 +Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. -taos> +taos> ``` TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息。如果失败,会有错误信息打印出来。 @@ -78,45 +79,74 @@ TDengine 终端成功连接服务端,打印出了欢迎消息和版本信息 ```bash $ taos> q -root@cdf548465318:~/TDengine-server-2.0.13.0# +root@c452519b0f9b:~/TDengine-server-2.0.20.13# ``` 2,在命令行界面执行 taosdemo。 ```bash -$ root@cdf548465318:~/TDengine-server-2.0.13.0# taosdemo -################################################################### -# Server IP: localhost:0 -# User: root -# Password: taosdata -# Use metric: true -# Datatype of Columns: int int int int int int int float -# Binary Length(If applicable): -1 -# Number of Columns per record: 3 -# Number of Threads: 10 -# Number of Tables: 10000 -# Number of Data per Table: 100000 -# Records/Request: 1000 -# Database name: test -# Table prefix: t -# Delete method: 0 -# Test time: 2021-04-13 02:05:20 -################################################################### +root@c452519b0f9b:~/TDengine-server-2.0.20.13# taosdemo + +taosdemo is simulating data generated by power equipments monitoring... + +host: 127.0.0.1:6030 +user: root +password: taosdata +configDir: +resultFile: ./output.txt +thread num of insert data: 10 +thread num of create table: 10 +top insert interval: 0 +number of records per req: 30000 +max sql length: 1048576 +database count: 1 +database[0]: + database[0] name: test + drop: yes + replica: 1 + precision: ms + super table count: 1 + super table[0]: + stbName: meters + autoCreateTable: no + childTblExists: no + childTblCount: 10000 + childTblPrefix: d + dataSource: rand + iface: taosc + insertRows: 10000 + interlaceRows: 0 + disorderRange: 1000 + disorderRatio: 0 + maxSqlLen: 1048576 + timeStampStep: 1 + startTimestamp: 2017-07-14 10:40:00.000 + sampleFormat: + sampleFile: + tagsFile: + columnCount: 3 +column[0]:FLOAT column[1]:INT column[2]:FLOAT + tagCount: 2 + tag[0]:INT tag[1]:BINARY(16) + + Press enter key to continue or Ctrl-C to stop ``` -回车后,该命令将新建一个数据库 test,并且自动创建一张超级表 meters,并以超级表 meters 为模版创建了 1 万张表,表名从 "t0" 到 "t9999"。每张表有 10 万条记录,每条记录有 f1,f2,f3 三个字段,时间戳 ts 字段从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:41:39 999"。每张表带有 areaid 和 loc 两个标签 TAG,areaid 被设置为 1 到 10,loc 被设置为 "beijing" 或 "shanghai"。 +回车后,该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 + +执行这条命令大概需要几分钟,最后共插入 1 亿条记录。 3,进入 TDengine 终端,查看 taosdemo 生成的数据。 - **进入命令行。** ```bash -$ root@cdf548465318:~/TDengine-server-2.0.13.0# taos +$ root@c452519b0f9b:~/TDengine-server-2.0.20.13# taos -Welcome to the TDengine shell from Linux, Client Version:2.0.13.0 +Welcome to the TDengine shell from Linux, Client Version:2.0.20.13 Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. -taos> +taos> ``` - **查看数据库。** @@ -124,8 +154,8 @@ taos> ```bash $ taos> show databases; name | created_time | ntables | vgroups | ··· - test | 2021-04-13 02:14:15.950 | 10000 | 6 | ··· - log | 2021-04-12 09:36:37.549 | 4 | 1 | ··· + test | 2021-08-18 06:01:11.021 | 10000 | 6 | ··· + log | 2021-08-18 05:51:51.065 | 4 | 1 | ··· ``` @@ -136,10 +166,10 @@ $ taos> use test; Database changed. $ taos> show stables; - name | created_time | columns | tags | tables | -===================================================================================== - meters | 2021-04-13 02:14:15.955 | 4 | 2 | 10000 | -Query OK, 1 row(s) in set (0.001737s) + name | created_time | columns | tags | tables | +============================================================================================ + meters | 2021-08-18 06:01:11.116 | 4 | 2 | 10000 | +Query OK, 1 row(s) in set (0.003259s) ``` @@ -147,42 +177,45 @@ Query OK, 1 row(s) in set (0.001737s) ```bash $ taos> select * from test.t0 limit 10; - ts | f1 | f2 | f3 | -==================================================================== - 2017-07-14 02:40:01.000 | 3 | 9 | 0 | - 2017-07-14 02:40:02.000 | 0 | 1 | 2 | - 2017-07-14 02:40:03.000 | 7 | 2 | 3 | - 2017-07-14 02:40:04.000 | 9 | 4 | 5 | - 2017-07-14 02:40:05.000 | 1 | 2 | 5 | - 2017-07-14 02:40:06.000 | 6 | 3 | 2 | - 2017-07-14 02:40:07.000 | 4 | 7 | 8 | - 2017-07-14 02:40:08.000 | 4 | 6 | 6 | - 2017-07-14 02:40:09.000 | 5 | 7 | 7 | - 2017-07-14 02:40:10.000 | 1 | 5 | 0 | -Query OK, 10 row(s) in set (0.003638s) + +DB error: Table does not exist (0.002857s) +taos> select * from test.d0 limit 10; + ts | current | voltage | phase | +====================================================================================== + 2017-07-14 10:40:00.000 | 10.12072 | 223 | 0.34167 | + 2017-07-14 10:40:00.001 | 10.16103 | 224 | 0.34445 | + 2017-07-14 10:40:00.002 | 10.00204 | 220 | 0.33334 | + 2017-07-14 10:40:00.003 | 10.00030 | 220 | 0.33333 | + 2017-07-14 10:40:00.004 | 9.84029 | 216 | 0.32222 | + 2017-07-14 10:40:00.005 | 9.88028 | 217 | 0.32500 | + 2017-07-14 10:40:00.006 | 9.88110 | 217 | 0.32500 | + 2017-07-14 10:40:00.007 | 10.08137 | 222 | 0.33889 | + 2017-07-14 10:40:00.008 | 10.12063 | 223 | 0.34167 | + 2017-07-14 10:40:00.009 | 10.16086 | 224 | 0.34445 | +Query OK, 10 row(s) in set (0.016791s) ``` -- **查看 t0 表的标签值。** +- **查看 d0 表的标签值。** ```bash -$ taos> select areaid, loc from test.t0; - areaid | loc | -=========================== - 10 | shanghai | -Query OK, 1 row(s) in set (0.002904s) +$ taos> select groupid, location from test.d0; + groupid | location | +================================= + 0 | shanghai | +Query OK, 1 row(s) in set (0.003490s) ``` ## 停止正在 Docker 中运行的 TDengine 服务 ```bash -$ docker stop cdf548465318 -cdf548465318 +$ docker stop tdengine +tdengine ``` - **docker stop**:通过 docker stop 停止指定的正在运行中的 docker 镜像。 -- **cdf548465318**:容器 ID,根据 docker ps 指令返回的结果进行修改。 +- **tdengine**:容器名称。 ## 编程开发时连接在 Docker 中的 TDengine @@ -195,7 +228,7 @@ $ docker run -d -v /etc/taos:/etc/taos -p 6041:6041 tdengine/tdengine 526aa188da767ae94b244226a2b2eec2b5f17dd8eff592893d9ec0cd0f3a1ccd $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql -{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep1,keep2,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","precision","status"],"data":[],"rows":0} +{"status":"succ","head":["name","created_time","ntables","vgroups","replica","quorum","days","keep0,keep1,keep(D)","cache(MB)","blocks","minrows","maxrows","wallevel","fsync","comp","cachelast","precision","update","status"],"column_meta":[["name",8,32],["created_time",9,8],["ntables",4,4],["vgroups",4,4],["replica",3,2],["quorum",3,2],["days",3,2],["keep0,keep1,keep(D)",8,24],["cache(MB)",4,4],["blocks",4,4],["minrows",4,4],["maxrows",4,4],["wallevel",2,1],["fsync",4,4],["comp",2,1],["cachelast",2,1],["precision",8,3],["update",2,1],["status",8,10]],"data":[["test","2021-08-18 06:01:11.021",10000,4,1,1,10,"3650,3650,3650",16,6,100,4096,1,3000,2,0,"ms",0,"ready"],["log","2021-08-18 05:51:51.065",4,1,1,1,10,"30,30,30",1,3,100,4096,1,3000,2,0,"us",0,"ready"]],"rows":2} ``` - 第一条命令,启动一个运行了 TDengine 的 docker 容器,并且将容器的 6041 端口映射到宿主机的 6041 端口上。 @@ -206,6 +239,5 @@ $ curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql 2,直接通过 exec 命令,进入到 docker 容器中去做开发。也即,把程序代码放在 TDengine 服务端所在的同一个 Docker 容器中,连接容器本地的 TDengine 服务。 ```bash -$ docker exec -it 526aa188da /bin/bash +$ docker exec -it tdengine /bin/bash ``` - From 16fe1e27bf5f5e62ab98527d36819e3818bcd102 Mon Sep 17 00:00:00 2001 From: wpan Date: Wed, 18 Aug 2021 16:45:59 +0800 Subject: [PATCH 109/165] fix nested query bug --- src/client/src/tscSubquery.c | 13 +++++++++++-- src/client/src/tscUtil.c | 33 ++++++++++++++------------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index d16d744393..816347cecd 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2717,10 +2717,19 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO if (!TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE)) { int32_t code = pParentSql->res.code; - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + SSqlObj *userSql = NULL; + if (pParentSql->param) { + userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; + } + + if (userSql == NULL) { + userSql = pParentSql; + } if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) { - tscFreeRetrieveSup(pParentSql); + if (userSql != pParentSql) { + tscFreeRetrieveSup(pParentSql); + } tscFreeSubobj(userSql); tfree(userSql->pSubs); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 06b61d443d..2ee6a85431 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3747,8 +3747,7 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { int32_t index = ps->subqueryIndex; bool ret = subAndCheckDone(pSql, pParentSql, index); - tfree(ps); - pSql->param = NULL; + tscFreeRetrieveSup(pSql); if (!ret) { tscDebug("0x%"PRIx64" sub:0x%"PRIx64" orderOfSub:%d completed, not all subquery finished", pParentSql->self, pSql->self, index); @@ -3758,41 +3757,37 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) { // todo refactor tscDebug("0x%"PRIx64" all subquery response received, retry", pParentSql->self); - SSqlObj *userSql = ((SRetrieveSupport*)pParentSql->param)->pParentSql; - - tscFreeRetrieveSup(pParentSql); - - if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry)) { + if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) { tscAsyncResultOnError(pParentSql); return; } - tscFreeSubobj(userSql); - tfree(userSql->pSubs); + tscFreeSubobj(pParentSql); + tfree(pParentSql->pSubs); - userSql->res.code = TSDB_CODE_SUCCESS; - userSql->retry++; + pParentSql->res.code = TSDB_CODE_SUCCESS; + pParentSql->retry++; - tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", userSql->self, - tstrerror(code), userSql->retry); + tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self, + tstrerror(code), pParentSql->retry); - tscResetSqlCmd(&userSql->cmd, true); + tscResetSqlCmd(&pParentSql->cmd, true); - code = tsParseSql(userSql, true); + code = tsParseSql(pParentSql, true); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; } if (code != TSDB_CODE_SUCCESS) { - userSql->res.code = code; - tscAsyncResultOnError(userSql); + pParentSql->res.code = code; + tscAsyncResultOnError(pParentSql); return; } - SQueryInfo *pQueryInfo = tscGetQueryInfo(&userSql->cmd); + SQueryInfo *pQueryInfo = tscGetQueryInfo(&pParentSql->cmd); - executeQuery(userSql, pQueryInfo); + executeQuery(pParentSql, pQueryInfo); return; } From f9b7b86eb964c82cf09844edb26110016910bacb Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Wed, 18 Aug 2021 17:08:02 +0800 Subject: [PATCH 110/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c04765b065..9fedb2dfe7 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,7 +199,10 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. + // But in the dll, the child thread will be killed before atexit takes effect.So taos_cleanup is not necessary. +#if defined(_UWIN) atexit(taos_cleanup); +#endif tscDebug("client is initialized successfully"); } From 97a3aeeb43de3c69572157a088f767bf0d29d946 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Wed, 18 Aug 2021 17:25:16 +0800 Subject: [PATCH 111/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 9fedb2dfe7..8c8afc8d88 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -200,7 +200,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. // But in the dll, the child thread will be killed before atexit takes effect.So taos_cleanup is not necessary. -#if defined(_UWIN) +#if !defined(TD_WINDOWS) atexit(taos_cleanup); #endif From 3155e72501e69d7ff3936cdf4927cd5e9215b262 Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 18 Aug 2021 19:43:44 +0800 Subject: [PATCH 112/165] enhance --- src/client/src/tscParseInsert.c | 398 +++----------------------------- 1 file changed, 29 insertions(+), 369 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index dab0dff1fc..793cee4ca2 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -448,370 +448,6 @@ int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start) { return TSDB_CODE_SUCCESS; } -static int32_t tsParseOneColumnOld(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, - bool primaryKey, int16_t timePrec) { - int64_t iv; - int32_t ret; - char * endptr = NULL; - - if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { - return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); - } - - switch (pSchema->type) { - case TSDB_DATA_TYPE_BOOL: { // bool - if (isNullStr(pToken)) { - *((uint8_t *)payload) = TSDB_DATA_BOOL_NULL; - } else { - if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { - if (strncmp(pToken->z, "true", pToken->n) == 0) { - *(uint8_t *)payload = TSDB_TRUE; - } else if (strncmp(pToken->z, "false", pToken->n) == 0) { - *(uint8_t *)payload = TSDB_FALSE; - } else { - return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); - } - } else if (pToken->type == TK_INTEGER) { - iv = strtoll(pToken->z, NULL, 10); - *(uint8_t *)payload = (int8_t)((iv == 0) ? TSDB_FALSE : TSDB_TRUE); - } else if (pToken->type == TK_FLOAT) { - double dv = strtod(pToken->z, NULL); - *(uint8_t *)payload = (int8_t)((dv == 0) ? TSDB_FALSE : TSDB_TRUE); - } else { - return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); - } - } - break; - } - - case TSDB_DATA_TYPE_TINYINT: - if (isNullStr(pToken)) { - *((uint8_t *)payload) = TSDB_DATA_TINYINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); - } else if (!IS_VALID_TINYINT(iv)) { - return tscInvalidOperationMsg(msg, "data overflow", pToken->z); - } - - *((uint8_t *)payload) = (uint8_t)iv; - } - - break; - - case TSDB_DATA_TYPE_UTINYINT: - if (isNullStr(pToken)) { - *((uint8_t *)payload) = TSDB_DATA_UTINYINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); - } else if (!IS_VALID_UTINYINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); - } - - *((uint8_t *)payload) = (uint8_t)iv; - } - - break; - - case TSDB_DATA_TYPE_SMALLINT: - if (isNullStr(pToken)) { - *((int16_t *)payload) = TSDB_DATA_SMALLINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); - } else if (!IS_VALID_SMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); - } - - *((int16_t *)payload) = (int16_t)iv; - } - - break; - - case TSDB_DATA_TYPE_USMALLINT: - if (isNullStr(pToken)) { - *((uint16_t *)payload) = TSDB_DATA_USMALLINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); - } else if (!IS_VALID_USMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); - } - - *((uint16_t *)payload) = (uint16_t)iv; - } - - break; - - case TSDB_DATA_TYPE_INT: - if (isNullStr(pToken)) { - *((int32_t *)payload) = TSDB_DATA_INT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); - } else if (!IS_VALID_INT(iv)) { - return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); - } - - *((int32_t *)payload) = (int32_t)iv; - } - - break; - - case TSDB_DATA_TYPE_UINT: - if (isNullStr(pToken)) { - *((uint32_t *)payload) = TSDB_DATA_UINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); - } else if (!IS_VALID_UINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); - } - - *((uint32_t *)payload) = (uint32_t)iv; - } - - break; - - case TSDB_DATA_TYPE_BIGINT: - if (isNullStr(pToken)) { - *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); - } else if (!IS_VALID_BIGINT(iv)) { - return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); - } - - *((int64_t *)payload) = iv; - } - break; - - case TSDB_DATA_TYPE_UBIGINT: - if (isNullStr(pToken)) { - *((uint64_t *)payload) = TSDB_DATA_UBIGINT_NULL; - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); - } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { - return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); - } - - *((uint64_t *)payload) = iv; - } - break; - - case TSDB_DATA_TYPE_FLOAT: - if (isNullStr(pToken)) { - *((int32_t *)payload) = TSDB_DATA_FLOAT_NULL; - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || - isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - // *((float *)payload) = (float)dv; - SET_FLOAT_VAL(payload, dv); - } - break; - - case TSDB_DATA_TYPE_DOUBLE: - if (isNullStr(pToken)) { - *((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL; - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - *((double *)payload) = dv; - } - break; - - case TSDB_DATA_TYPE_BINARY: - // binary data cannot be null-terminated char string, otherwise the last char of the string is lost - if (pToken->type == TK_NULL) { - setVardataNull(payload, TSDB_DATA_TYPE_BINARY); - } else { // too long values will return invalid sql, not be truncated automatically - if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor - return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); - } - - STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); - } - - break; - - case TSDB_DATA_TYPE_NCHAR: - if (pToken->type == TK_NULL) { - setVardataNull(payload, TSDB_DATA_TYPE_NCHAR); - } else { - // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' - int32_t output = 0; - if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(payload), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { - char buf[512] = {0}; - snprintf(buf, tListLen(buf), "%s", strerror(errno)); - return tscInvalidOperationMsg(msg, buf, pToken->z); - } - - varDataSetLen(payload, output); - } - break; - - case TSDB_DATA_TYPE_TIMESTAMP: { - if (pToken->type == TK_NULL) { - if (primaryKey) { - *((int64_t *)payload) = 0; - } else { - *((int64_t *)payload) = TSDB_DATA_BIGINT_NULL; - } - } else { - int64_t temp; - if (tsParseTime(pToken, &temp, str, msg, timePrec) != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); - } - - *((int64_t *)payload) = temp; - } - - break; - } - } - - return TSDB_CODE_SUCCESS; -} - -static int tsParseOneRowOld(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, - char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { - int32_t index = 0; - SStrToken sToken = {0}; - char * payload = pDataBlocks->pData + pDataBlocks->size; - - SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo; - SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta); - - // 1. set the parsed value from sql string - int32_t rowSize = 0; - for (int i = 0; i < spd->numOfBound; ++i) { - // the start position in data block buffer of current value in sql - int32_t colIndex = spd->boundedColumns[i]; - - char * start = payload + spd->cols[colIndex].offset; - SSchema *pSchema = &schema[colIndex]; - rowSize += pSchema->bytes; - - index = 0; - sToken = tStrGetToken(*str, &index, true); - *str += index; - - if (sToken.type == TK_QUESTION) { - if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) { - return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str); - } - - uint32_t offset = (uint32_t)(start - pDataBlocks->pData); - if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) { - continue; - } - - strcpy(pInsertParam->msg, "client out of memory"); - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - int16_t type = sToken.type; - if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL && - type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || - (sToken.n == 0) || (type == TK_RP)) { - return tscSQLSyntaxErrMsg(pInsertParam->msg, "invalid data or symbol", sToken.z); - } - - // Remove quotation marks - if (TK_STRING == sToken.type) { - // delete escape character: \\, \', \" - char delim = sToken.z[0]; - - int32_t cnt = 0; - int32_t j = 0; - if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) { - return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z); - } - - for (uint32_t k = 1; k < sToken.n - 1; ++k) { - if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) { - tmpTokenBuf[j] = sToken.z[k + 1]; - - cnt++; - j++; - k++; - continue; - } - - tmpTokenBuf[j] = sToken.z[k]; - j++; - } - - tmpTokenBuf[j] = 0; - sToken.z = tmpTokenBuf; - sToken.n -= 2 + cnt; - } - - bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); - int32_t ret = tsParseOneColumnOld(pSchema, &sToken, start, pInsertParam->msg, str, isPrimaryKey, timePrec); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - - if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) { - tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); - return TSDB_CODE_TSC_INVALID_TIME_STAMP; - } - } - - // 2. set the null value for the columns that do not assign values - if (spd->numOfBound < spd->numOfCols) { - char *ptr = payload; - - for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null - if (schema[i].type == TSDB_DATA_TYPE_BINARY) { - varDataSetLen(ptr, sizeof(int8_t)); - *(uint8_t *)varDataVal(ptr) = TSDB_DATA_BINARY_NULL; - } else if (schema[i].type == TSDB_DATA_TYPE_NCHAR) { - varDataSetLen(ptr, sizeof(int32_t)); - *(uint32_t *)varDataVal(ptr) = TSDB_DATA_NCHAR_NULL; - } else { - setNull(ptr, schema[i].type, schema[i].bytes); - } - } - - ptr += schema[i].bytes; - } - - rowSize = (int32_t)(ptr - payload); - } - - *len = rowSize; - return TSDB_CODE_SUCCESS; -} - int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, int32_t *len, char *tmpTokenBuf, SInsertStatementParam *pInsertParam) { int32_t index = 0; @@ -1036,6 +672,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn } } +#if 1 void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { pColInfo->numOfCols = numOfCols; pColInfo->numOfBound = numOfCols; @@ -1071,6 +708,30 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32 pColInfo->allNullLen += pColInfo->flen; pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); } +#endif + +#if 0 +void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { + pColInfo->numOfCols = numOfCols; + pColInfo->numOfBound = numOfCols; + pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode + + pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t)); + pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn)); + pColInfo->colIdxInfo = NULL; + pColInfo->flen = 0; + pColInfo->allNullLen = 0; + + for (int32_t i = 0; i < pColInfo->numOfCols; ++i) { + if (i > 0) { + pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset; + } + + pColInfo->cols[i].valStat = VAL_STAT_HAS; + pColInfo->boundedColumns[i] = i; + } +} +#endif int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { size_t remain = pDataBlock->nAllocSize - pDataBlock->size; @@ -1172,13 +833,12 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk // allocate memory size_t nAlloc = nRows * sizeof(SBlockKeyTuple); if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) { - size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple); - char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc); + char *tmp = trealloc(pBlkKeyInfo->pKeyTuple, nAlloc); if (tmp == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp; - pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc; + pBlkKeyInfo->maxBytesAlloc = (int32_t)nAlloc; } memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc); @@ -2061,7 +1721,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow STableComInfo tinfo = tscGetTableInfo(pTableMeta); SInsertStatementParam* pInsertParam = &pCmd->insertParam; - pInsertParam->payloadType = PAYLOAD_TYPE_RAW; + // pInsertParam->payloadType = PAYLOAD_TYPE_RAW; destroyTableNameList(pInsertParam); pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks); @@ -2103,7 +1763,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow strtolower(line, line); int32_t len = 0; - code = tsParseOneRowOld(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam); + code = tsParseOneRow(&lineptr, pTableDataBlock, tinfo.precision, &len, tokenBuf, pInsertParam); if (code != TSDB_CODE_SUCCESS || pTableDataBlock->numOfParams > 0) { pSql->res.code = code; break; From 2cddc07d2b95f9dc189de49dee5f7bd5d8289ebf Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Wed, 18 Aug 2021 19:48:30 +0800 Subject: [PATCH 113/165] [TD-6184]: optimize insert from imported file --- src/client/src/tscParseInsert.c | 32 ++------------------------------ src/client/src/tscUtil.c | 1 - 2 files changed, 2 insertions(+), 31 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 793cee4ca2..f5d9a6a17e 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -51,20 +51,18 @@ int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint3 } } + // default compareStat is ROW_COMPARE_NO_NEED if (nBoundCols == 0) { // file input pBuilder->memRowType = SMEM_ROW_DATA; - // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else { float boundRatio = ((float)nBoundCols / (float)nCols); if (boundRatio < KVRatioKV) { pBuilder->memRowType = SMEM_ROW_KV; - // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } else if (boundRatio > KVRatioData) { pBuilder->memRowType = SMEM_ROW_DATA; - // pBuilder->compareStat = ROW_COMPARE_NO_NEED; return TSDB_CODE_SUCCESS; } pBuilder->compareStat = ROW_COMPARE_NEED; @@ -672,7 +670,6 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn } } -#if 1 void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { pColInfo->numOfCols = numOfCols; pColInfo->numOfBound = numOfCols; @@ -708,30 +705,6 @@ void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32 pColInfo->allNullLen += pColInfo->flen; pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); } -#endif - -#if 0 -void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { - pColInfo->numOfCols = numOfCols; - pColInfo->numOfBound = numOfCols; - pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode - - pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t)); - pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn)); - pColInfo->colIdxInfo = NULL; - pColInfo->flen = 0; - pColInfo->allNullLen = 0; - - for (int32_t i = 0; i < pColInfo->numOfCols; ++i) { - if (i > 0) { - pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset; - } - - pColInfo->cols[i].valStat = VAL_STAT_HAS; - pColInfo->boundedColumns[i] = i; - } -} -#endif int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { size_t remain = pDataBlock->nAllocSize - pDataBlock->size; @@ -1720,8 +1693,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; STableComInfo tinfo = tscGetTableInfo(pTableMeta); - SInsertStatementParam* pInsertParam = &pCmd->insertParam; - // pInsertParam->payloadType = PAYLOAD_TYPE_RAW; + SInsertStatementParam *pInsertParam = &pCmd->insertParam; destroyTableNameList(pInsertParam); pInsertParam->pDataBlocks = tscDestroyBlockArrayList(pInsertParam->pDataBlocks); diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index f3e30172ab..612d6a5642 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1744,7 +1744,6 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff dataBuf->tsSource = -1; dataBuf->vgId = dataBuf->pTableMeta->vgId; - tNameAssign(&dataBuf->tableName, name); assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL); From ffe99d41d9eede67d473380f84834ab68e829b6a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 19 Aug 2021 10:02:12 +0800 Subject: [PATCH 114/165] Hotfix/sangshuduo/td 5844 cmdline parameters align for master (#7444) * [TD-5844]: make cmd line parameter similar. * fix test case align with taosdemo change. * fix windows stack overflow issue. * fix mac compile error. * fix taosdemo cmdline parameter in tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py * make taos.exe use mysql style password input. * make taos shell and taosdump use mysql style password input. * determine scanf return value. * cherry pick from develop/feature branch. --- src/kit/shell/src/shellDarwin.c | 23 ++++++++++++++--- src/kit/shell/src/shellEngine.c | 42 ++++++++++++++++++-------------- src/kit/shell/src/shellLinux.c | 35 +++++++++++++++++++++++--- src/kit/shell/src/shellMain.c | 2 ++ src/kit/shell/src/shellWindows.c | 30 +++++++++++++++++------ src/kit/taosdemo/taosdemo.c | 8 ++++-- src/kit/taosdump/taosdump.c | 31 +++++++++++++++++++---- src/os/inc/osSystem.h | 2 ++ src/os/src/darwin/darwinSystem.c | 26 ++++++++++++++++++++ src/os/src/linux/osSystem.c | 24 ++++++++++++++++++ src/os/src/windows/wSystem.c | 14 +++++++++++ 11 files changed, 196 insertions(+), 41 deletions(-) diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 4dcd8b3d50..5ca4537aeb 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -64,6 +64,10 @@ void printHelp() { exit(EXIT_SUCCESS); } +char DARWINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; +char g_password[MAX_PASSWORD_SIZE]; + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { wordexp_t full_path; for (int i = 1; i < argc; i++) { @@ -77,10 +81,21 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strcmp(argv[i], "-p") == 0) { - arguments->is_use_passwd = true; + else if (strncmp(argv[i], "-p", 2) == 0) { + strcpy(tsOsName, "Darwin"); + printf(DARWINCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%s", g_password) > 1) { + fprintf(stderr, "password read error\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; } - // for management port + // for management port else if (strcmp(argv[i], "-P") == 0) { if (i < argc - 1) { arguments->port = atoi(argv[++i]); @@ -98,7 +113,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { + if (i < argc - 1) { if (strlen(argv[++i]) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", argv[i], TSDB_FILENAME_LEN - 1); exit(EXIT_FAILURE); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 58f4b7ff02..51a25d59c4 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -65,7 +65,15 @@ extern TAOS *taos_connect_auth(const char *ip, const char *user, const char *aut */ TAOS *shellInit(SShellArguments *_args) { printf("\n"); - printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); + if (!_args->is_use_passwd) { +#ifdef TD_WINDOWS + strcpy(tsOsName, "Windows"); +#elif defined(TD_DARWIN) + strcpy(tsOsName, "Darwin"); +#endif + printf(CLIENT_VERSION, tsOsName, taos_get_client_info()); + } + fflush(stdout); // set options before initializing @@ -73,9 +81,7 @@ TAOS *shellInit(SShellArguments *_args) { taos_options(TSDB_OPTION_TIMEZONE, _args->timezone); } - if (_args->is_use_passwd) { - if (_args->password == NULL) _args->password = getpass("Enter password: "); - } else { + if (!_args->is_use_passwd) { _args->password = TSDB_DEFAULT_PASS; } @@ -170,7 +176,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { system("clear"); return 0; } - + if (regex_match(command, "^[\t ]*set[ \t]+max_binary_display_width[ \t]+(default|[1-9][0-9]*)[ \t;]*$", REG_EXTENDED | REG_ICASE)) { strtok(command, " \t"); strtok(NULL, " \t"); @@ -182,7 +188,7 @@ static int32_t shellRunSingleCommand(TAOS *con, char *command) { } return 0; } - + if (regex_match(command, "^[ \t]*source[\t ]+[^ ]+[ \t;]*$", REG_EXTENDED | REG_ICASE)) { /* If source file. */ char *c_ptr = strtok(command, " ;"); @@ -247,7 +253,7 @@ int32_t shellRunCommand(TAOS* con, char* command) { esc = false; continue; } - + if (c == '\\') { esc = true; continue; @@ -336,8 +342,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { } if (!tscIsUpdateQuery(pSql)) { // select and show kinds of commands - int error_no = 0; - + int error_no = 0; + int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode); if (numOfRows < 0) { atomic_store_64(&result, 0); @@ -530,7 +536,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { fprintf(fp, "%s", fields[col].name); } fputc('\n', fp); - + int numOfRows = 0; do { int32_t* length = taos_fetch_lengths(tres); @@ -716,7 +722,7 @@ static int verticalPrintResult(TAOS_RES* tres) { int numOfRows = 0; int showMore = 1; - do { + do { if (numOfRows < resShowMaxNum) { printf("*************************** %d.row ***************************\n", numOfRows + 1); @@ -851,7 +857,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { int numOfRows = 0; int showMore = 1; - + do { int32_t* length = taos_fetch_lengths(tres); if (numOfRows < resShowMaxNum) { @@ -867,7 +873,7 @@ static int horizontalPrintResult(TAOS_RES* tres) { printf("[You can add limit statement to show more or redirect results to specific file to get all.]\n"); showMore = 0; } - + numOfRows++; row = taos_fetch_row(tres); } while(row != NULL); @@ -909,7 +915,7 @@ void read_history() { if (errno != ENOENT) { fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno)); } -#endif +#endif return; } @@ -934,9 +940,9 @@ void write_history() { FILE *f = fopen(f_history, "w"); if (f == NULL) { -#ifndef WINDOWS +#ifndef WINDOWS fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno)); -#endif +#endif return; } @@ -982,13 +988,13 @@ void source_file(TAOS *con, char *fptr) { /* if (access(fname, F_OK) != 0) { fprintf(stderr, "ERROR: file %s is not exist\n", fptr); - + wordfree(&full_path); free(cmd); return; } */ - + FILE *f = fopen(fname, "r"); if (f == NULL) { fprintf(stderr, "ERROR: failed to open file %s\n", fname); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index dc74f6fcaa..d051d3535e 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -34,7 +34,7 @@ static char doc[] = ""; static char args_doc[] = ""; static struct argp_option options[] = { {"host", 'h', "HOST", 0, "TDengine server FQDN to connect. The default host is localhost."}, - {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."}, + {"password", 'p', 0, 0, "The password to use when connecting to the server."}, {"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."}, {"user", 'u', "USER", 0, "The user name to use when connecting to the server."}, {"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."}, @@ -63,8 +63,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { arguments->host = arg; break; case 'p': - arguments->is_use_passwd = true; - if (arg) arguments->password = arg; break; case 'P': if (arg) { @@ -160,12 +158,41 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; +char LINUXCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; +char g_password[MAX_PASSWORD_SIZE]; + +static void parse_password( + int argc, char *argv[], SShellArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0) { + strcpy(tsOsName, "Linux"); + printf(LINUXCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%20s", g_password) > 1) { + fprintf(stderr, "password reading error\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; + arguments->is_use_passwd = true; + } + } +} + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { static char verType[32] = {0}; sprintf(verType, "version: %s\n", version); argp_program_version = verType; - + + if (argc > 1) { + parse_password(argc, argv, arguments); + } + argp_parse(&argp, argc, argv, 0, 0, arguments); if (arguments->abort) { #ifndef _ALPINE diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 0c70386061..4e00b0d8ff 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -71,7 +71,9 @@ int checkVersion() { // Global configurations SShellArguments args = { .host = NULL, +#ifndef TD_WINDOWS .password = NULL, +#endif .user = NULL, .database = NULL, .timezone = NULL, diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index 87d11a3516..bf9afe4b80 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -19,6 +19,9 @@ extern char configDir[]; +char WINCLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; + void printVersion() { printf("version: %s\n", version); } @@ -61,6 +64,8 @@ void printHelp() { exit(EXIT_SUCCESS); } +char g_password[MAX_PASSWORD_SIZE]; + void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { for (int i = 1; i < argc; i++) { // for host @@ -73,11 +78,20 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { } } // for password - else if (strcmp(argv[i], "-p") == 0) { - arguments->is_use_passwd = true; - if (i < argc - 1 && argv[i + 1][0] != '-') { - arguments->password = argv[++i]; - } + else if (strncmp(argv[i], "-p", 2) == 0) { + arguments->is_use_passwd = true; + strcpy(tsOsName, "Windows"); + printf(WINCLIENT_VERSION, tsOsName, taos_get_client_info()); + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + if (scanf("%s", g_password) > 1) { + fprintf(stderr, "password read error!\n"); + } + getchar(); + } else { + tstrncpy(g_password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + arguments->password = g_password; } // for management port else if (strcmp(argv[i], "-P") == 0) { @@ -104,7 +118,7 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } else if (strcmp(argv[i], "-c") == 0) { - if (i < argc - 1) { + if (i < argc - 1) { char *tmp = argv[++i]; if (strlen(tmp) >= TSDB_FILENAME_LEN) { fprintf(stderr, "config file path: %s overflow max len %d\n", tmp, TSDB_FILENAME_LEN - 1); @@ -265,7 +279,7 @@ void *shellLoopQuery(void *arg) { if (command == NULL) return NULL; int32_t err = 0; - + do { memset(command, 0, MAX_COMMAND_SIZE); shellPrintPrompt(); @@ -274,7 +288,7 @@ void *shellLoopQuery(void *arg) { err = shellReadCommand(con, command); if (err) { break; - } + } } while (shellRunCommand(con, command) == 0); return NULL; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 016e27dc13..9a7d3cd25c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -866,8 +866,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->user = argv[++i]; } else if (strncmp(argv[i], "-p", 2) == 0) { if (strlen(argv[i]) == 2) { - printf("Enter password:"); - scanf("%s", arguments->password); + printf("Enter password: "); + taosSetConsoleEcho(false); + if (scanf("%s", arguments->password) > 1) { + fprintf(stderr, "password read error!\n"); + } + taosSetConsoleEcho(true); } else { tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); } diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index bea6e65106..c54b8da1b7 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -206,9 +206,9 @@ static struct argp_option options[] = { {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, #ifdef _TD_POWER_ - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, + {"password", 'p', 0, 0, "User password to connect to server. Default is powerdb.", 0}, #else - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, + {"password", 'p', 0, 0, "User password to connect to server. Default is taosdata.", 0}, #endif {"port", 'P', "PORT", 0, "Port to connect", 0}, {"cversion", 'v', "CVERION", 0, "client version", 0}, @@ -248,12 +248,14 @@ static struct argp_option options[] = { {0} }; +#define MAX_PASSWORD_SIZE 20 + /* Used by main to communicate with parse_opt. */ typedef struct arguments { // connection option char *host; char *user; - char *password; + char password[MAX_PASSWORD_SIZE]; uint16_t port; char cversion[12]; uint16_t mysqlFlag; @@ -376,7 +378,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.user = arg; break; case 'p': - g_args.password = arg; break; case 'P': g_args.port = atoi(arg); @@ -554,6 +555,25 @@ static void parse_precision_first( } } +static void parse_password( + int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0) { + if (strlen(argv[i]) == 2) { + printf("Enter password: "); + taosSetConsoleEcho(false); + if(scanf("%20s", arguments->password) > 1) { + errorPrint("%s() LN%d, password read error!\n", __func__, __LINE__); + } + taosSetConsoleEcho(true); + } else { + tstrncpy(arguments->password, (char *)(argv[i] + 2), MAX_PASSWORD_SIZE); + } + argv[i] = ""; + } + } +} + static void parse_timestamp( int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { @@ -616,9 +636,10 @@ int main(int argc, char *argv[]) { int ret = 0; /* Parse our arguments; every option seen by parse_opt will be reflected in arguments. */ - if (argc > 2) { + if (argc > 1) { parse_precision_first(argc, argv, &g_args); parse_timestamp(argc, argv, &g_args); + parse_password(argc, argv, &g_args); } argp_parse(&argp, argc, argv, 0, 0, &g_args); diff --git a/src/os/inc/osSystem.h b/src/os/inc/osSystem.h index e7a3ec13ae..4b79250740 100644 --- a/src/os/inc/osSystem.h +++ b/src/os/inc/osSystem.h @@ -24,6 +24,8 @@ void* taosLoadDll(const char *filename); void* taosLoadSym(void* handle, char* name); void taosCloseDll(void *handle); +int taosSetConsoleEcho(bool on); + #ifdef __cplusplus } #endif diff --git a/src/os/src/darwin/darwinSystem.c b/src/os/src/darwin/darwinSystem.c index 17cafdd664..6f296c9fef 100644 --- a/src/os/src/darwin/darwinSystem.c +++ b/src/os/src/darwin/darwinSystem.c @@ -29,4 +29,30 @@ void* taosLoadSym(void* handle, char* name) { void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ +#if 0 +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + +#endif + return 0; +} diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c index 052b7a22a8..0cdb20dbdb 100644 --- a/src/os/src/linux/osSystem.c +++ b/src/os/src/linux/osSystem.c @@ -51,4 +51,28 @@ void taosCloseDll(void *handle) { } } +int taosSetConsoleEcho(bool on) +{ +#define ECHOFLAGS (ECHO | ECHOE | ECHOK | ECHONL) + int err; + struct termios term; + + if (tcgetattr(STDIN_FILENO, &term) == -1) { + perror("Cannot get the attribution of the terminal"); + return -1; + } + + if (on) + term.c_lflag|=ECHOFLAGS; + else + term.c_lflag &=~ECHOFLAGS; + + err = tcsetattr(STDIN_FILENO,TCSAFLUSH,&term); + if (err == -1 && err == EINTR) { + perror("Cannot set the attribution of the terminal"); + return -1; + } + + return 0; +} diff --git a/src/os/src/windows/wSystem.c b/src/os/src/windows/wSystem.c index 17cafdd664..564005f79b 100644 --- a/src/os/src/windows/wSystem.c +++ b/src/os/src/windows/wSystem.c @@ -30,3 +30,17 @@ void taosCloseDll(void *handle) { } +int taosSetConsoleEcho(bool on) +{ + HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); + DWORD mode = 0; + GetConsoleMode(hStdin, &mode ); + if (on) { + mode |= ENABLE_ECHO_INPUT; + } else { + mode &= ~ENABLE_ECHO_INPUT; + } + SetConsoleMode(hStdin, mode); + + return 0; +} From 9f0eb43164676fe8763dfeff90d63ced8416635e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 19 Aug 2021 10:57:23 +0800 Subject: [PATCH 115/165] add topicBianryLen config parameters for cenc --- src/common/inc/tglobal.h | 1 + src/common/src/tglobal.c | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index ed35168457..3c2069339a 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -158,6 +158,7 @@ extern char tsDataDir[]; extern char tsLogDir[]; extern char tsScriptDir[]; extern int64_t tsTickPerDay[3]; +extern int32_t tsTopicBianryLen; // system info extern char tsOsName[]; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index e4ff353787..38ae56eb24 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -199,6 +199,7 @@ char tsScriptDir[PATH_MAX] = {0}; char tsTempDir[PATH_MAX] = "/tmp/"; int32_t tsDiskCfgNum = 0; +int32_t tsTopicBianryLen = 16000; #ifndef _STORAGE SDiskCfg tsDiskCfg[1]; @@ -1216,6 +1217,16 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "topicBianryLen"; + cfg.ptr = &tsTopicBianryLen; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 16; + cfg.maxValue = 16000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + cfg.option = "httpEnableRecordSql"; cfg.ptr = &tsHttpEnableRecordSql; cfg.valType = TAOS_CFG_VTYPE_INT8; From eb4e9067376eac12fd930719a0eb5825c4283113 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 19 Aug 2021 14:13:17 +0800 Subject: [PATCH 116/165] [ci skip] update scirpt for csv generator --- .../pytest/insert/insertFromCSVPerformance.py | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/tests/pytest/insert/insertFromCSVPerformance.py b/tests/pytest/insert/insertFromCSVPerformance.py index f3b9c2734d..487497631a 100644 --- a/tests/pytest/insert/insertFromCSVPerformance.py +++ b/tests/pytest/insert/insertFromCSVPerformance.py @@ -28,7 +28,7 @@ class insertFromCSVPerformace: self.tbName = tbName self.branchName = branchName self.type = buildType - self.ts = 1500074556514 + self.ts = 1500000000000 self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" @@ -46,13 +46,20 @@ class insertFromCSVPerformace: config = self.config) def writeCSV(self): - with open('test3.csv','w', encoding='utf-8', newline='') as csvFile: + tsset = set() + rows = 0 + with open('test4.csv','w', encoding='utf-8', newline='') as csvFile: writer = csv.writer(csvFile, dialect='excel') - for i in range(1000000): - newTimestamp = self.ts + random.randint(10000000, 10000000000) + random.randint(1000, 10000000) + random.randint(1, 1000) - d = datetime.datetime.fromtimestamp(newTimestamp / 1000) - dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f")) - writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)]) + while True: + newTimestamp = self.ts + random.randint(1, 10) * 10000000000 + random.randint(1, 10) * 1000000000 + random.randint(1, 10) * 100000000 + random.randint(1, 10) * 10000000 + random.randint(1, 10) * 1000000 + random.randint(1, 10) * 100000 + random.randint(1, 10) * 10000 + random.randint(1, 10) * 1000 + random.randint(1, 10) * 100 + random.randint(1, 10) * 10 + random.randint(1, 10) + if newTimestamp not in tsset: + tsset.add(newTimestamp) + d = datetime.datetime.fromtimestamp(newTimestamp / 1000) + dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f")) + writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)]) + rows += 1 + if rows == 2000000: + break def removCSVHeader(self): data = pd.read_csv("ordered.csv") @@ -71,7 +78,9 @@ class insertFromCSVPerformace: cursor.execute("create table if not exists t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)") startTime = time.time() cursor.execute("insert into t1 file 'outoforder.csv'") - totalTime += time.time() - startTime + totalTime += time.time() - startTime + time.sleep(1) + out_of_order_time = (float) (totalTime / 10) print("Out of Order - Insert time: %f" % out_of_order_time) @@ -81,7 +90,8 @@ class insertFromCSVPerformace: cursor.execute("create table if not exists t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)") startTime = time.time() cursor.execute("insert into t2 file 'ordered.csv'") - totalTime += time.time() - startTime + totalTime += time.time() - startTime + time.sleep(1) in_order_time = (float) (totalTime / 10) print("In order - Insert time: %f" % in_order_time) From e09367b01e379cb345a2e569df236de0776d4f21 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Thu, 19 Aug 2021 14:18:18 +0800 Subject: [PATCH 117/165] [TD-6209]add case for TD-6027 --- tests/pytest/alter/alter_table.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/tests/pytest/alter/alter_table.py b/tests/pytest/alter/alter_table.py index a5acb7a73e..33e0aec727 100644 --- a/tests/pytest/alter/alter_table.py +++ b/tests/pytest/alter/alter_table.py @@ -102,6 +102,20 @@ class TDTestCase: print("check2: i=%d colIdx=%d" % (i, colIdx)) tdSql.checkData(0, i, self.rowNum * (colIdx - i + 3)) + def alter_table_255_times(self): # add case for TD-6207 + for i in range(255): + tdLog.info("alter table st add column cb%d int"%i) + tdSql.execute("alter table st add column cb%d int"%i) + tdSql.execute("insert into t0 (ts,c1) values(now,1)") + tdSql.execute("reset query cache") + tdSql.query("select * from st") + tdSql.execute("create table mt(ts timestamp, i int)") + tdSql.execute("insert into mt values(now,11)") + tdSql.query("select * from mt") + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("describe db.st") + def run(self): # Setup params db = "db" @@ -131,12 +145,14 @@ class TDTestCase: tdSql.checkData(0, i, self.rowNum * (size - i)) - tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float)") - tdSql.execute("create table t0 using st tags(null)") + tdSql.execute("create table st(ts timestamp, c1 int) tags(t1 float,t2 int,t3 double)") + tdSql.execute("create table t0 using st tags(null,1,2.3)") tdSql.execute("alter table t0 set tag t1=2.1") tdSql.query("show tables") tdSql.checkRows(2) + self.alter_table_255_times() + def stop(self): tdSql.close() From c5cf06d2c6e912cc52552094016b9b1a6d417e85 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 20 Aug 2021 10:27:19 +0800 Subject: [PATCH 118/165] full test for feature/TD-6214 --- tests/pytest/query/nestedQuery/queryWithOrderLimit.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py index 692b5b7d36..aa16e8cc76 100644 --- a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py +++ b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py @@ -29,7 +29,6 @@ class TDTestCase: self.tables = 10 self.rowsPerTable = 100 - def run(self): # tdSql.execute("drop database db ") tdSql.prepare() From 5053132cf4bdce14207cac6b8aa9bf6a07fd23d5 Mon Sep 17 00:00:00 2001 From: liuyq-617 Date: Fri, 20 Aug 2021 16:34:41 +0800 Subject: [PATCH 119/165] [TD-6223]concurrent query support nested query --- tests/pytest/concurrent_inquiry.py | 76 ++++++++++++++++++++++++++---- 1 file changed, 66 insertions(+), 10 deletions(-) diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index 333c2a0a57..7af38c3b56 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -175,12 +175,62 @@ class ConcurrentInquiry: def con_group(self,tlist,col_list,tag_list): rand_tag = random.randint(0,5) rand_col = random.randint(0,1) - return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag)) - + if len(tag_list): + return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag)) + else: + return 'group by '+','.join(random.sample(col_list,rand_col)) + def con_order(self,tlist,col_list,tag_list): return 'order by '+random.choice(tlist) - def gen_query_sql(self): #生成查询语句 + def gen_subquery_sql(self): + subsql ,col_num = self.gen_query_sql(1) + if col_num == 0: + return 0 + col_list=[] + tag_list=[] + for i in range(col_num): + col_list.append("taosd%d"%i) + + tlist=col_list+['abc'] #增加不存在的域'abc',是否会引起新bug + con_rand=random.randint(0,len(condition_list)) + func_rand=random.randint(0,len(func_list)) + col_rand=random.randint(0,len(col_list)) + t_rand=random.randint(0,len(tlist)) + sql='select ' #select + random.shuffle(col_list) + random.shuffle(func_list) + sel_col_list=[] + col_rand=random.randint(0,len(col_list)) + loop = 0 + for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数 + alias = ' as '+ 'sub%d ' % loop + loop += 1 + pick_func = '' + if j == 'leastsquares': + pick_func=j+'('+i+',1,1)' + elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile': + pick_func=j+'('+i+',1)' + else: + pick_func=j+'('+i+')' + if bool(random.getrandbits(1)) : + pick_func+=alias + sel_col_list.append(pick_func) + if col_rand == 0: + sql = sql + '*' + else: + sql=sql+','.join(sel_col_list) #select col & func + sql = sql + ' from ('+ subsql +') ' + con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] + sel_con=random.sample(con_func,random.randint(0,len(con_func))) + sel_con_list=[] + for i in sel_con: + sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数 + sql+=' '.join(sel_con_list) # condition + #print(sql) + return sql + + def gen_query_sql(self,subquery=0): #生成查询语句 tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表 tbname='' col_list=[] @@ -218,10 +268,10 @@ class ConcurrentInquiry: pick_func=j+'('+i+',1)' else: pick_func=j+'('+i+')' - if bool(random.getrandbits(1)): + if bool(random.getrandbits(1)) | subquery : pick_func+=alias sel_col_list.append(pick_func) - if col_rand == 0: + if col_rand == 0 & subquery : sql = sql + '*' else: sql=sql+','.join(sel_col_list) #select col & func @@ -238,7 +288,7 @@ class ConcurrentInquiry: sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数 sql+=' '.join(sel_con_list) # condition #print(sql) - return sql + return (sql,loop) def gen_query_join(self): #生成join查询语句 tbname = [] @@ -429,9 +479,12 @@ class ConcurrentInquiry: try: if self.random_pick(): - sql=self.gen_query_sql() + if self.random_pick(): + sql,temp=self.gen_query_sql() + else: + sql = self.gen_subquery_sql() else: - sql=self.gen_query_join() + sql = self.gen_query_join() print("sql is ",sql) fo.write(sql+'\n') start = time.time() @@ -496,9 +549,12 @@ class ConcurrentInquiry: while loop: try: if self.random_pick(): - sql=self.gen_query_sql() + if self.random_pick(): + sql,temp=self.gen_query_sql() + else: + sql = self.gen_subquery_sql() else: - sql=self.gen_query_join() + sql = self.gen_query_join() print("sql is ",sql) fo.write(sql+'\n') start = time.time() From 8d7cb8f0c29963df332629f785c7966968144b49 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Sat, 21 Aug 2021 21:25:56 +0800 Subject: [PATCH 120/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 3 --- src/util/src/tcache.c | 3 ++- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 8c8afc8d88..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,10 +199,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. - // But in the dll, the child thread will be killed before atexit takes effect.So taos_cleanup is not necessary. -#if !defined(TD_WINDOWS) atexit(taos_cleanup); -#endif tscDebug("client is initialized successfully"); } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 69b3741e13..aaa1d5ba9e 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -537,7 +537,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - while(atomic_load_8(&pCacheObj->deleting) != 0) { + // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. + for (int i = 0; i < 20&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } From 96a4a48e43cee783208e504a3ae9b9d3b844ffc7 Mon Sep 17 00:00:00 2001 From: xywang Date: Sun, 22 Aug 2021 17:01:46 +0800 Subject: [PATCH 121/165] [TD-6001]: use db_name in url if exists --- src/client/CMakeLists.txt | 1 + src/client/inc/tsclient.h | 6 ++++++ src/client/src/tscSQLParser.c | 24 ++++++++++++++++++++++-- src/plugins/http/inc/httpInt.h | 1 + src/plugins/http/inc/httpRestHandle.h | 10 +++++----- src/plugins/http/src/httpRestHandle.c | 14 ++++++++++++-- src/plugins/http/src/httpSql.c | 5 +++++ 7 files changed, 52 insertions(+), 9 deletions(-) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 2f83557d63..77417a24a4 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -4,6 +4,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(jni) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc) AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 4ed7894931..6cec0bd798 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -38,6 +38,11 @@ extern "C" { #include "qUtil.h" #include "tcmdtype.h" +typedef enum { + TAOS_REQ_FROM_SHELL, + TAOS_REQ_FROM_HTTP +} SReqOrigin; + // forward declaration struct SSqlInfo; @@ -340,6 +345,7 @@ typedef struct STscObj { SRpcCorEpSet *tscCorMgmtEpSet; pthread_mutex_t mutex; int32_t numOfObj; // number of sqlObj from this tscObj + SReqOrigin from; } STscObj; typedef struct SSubqueryState { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 612a3d4798..d1b2e6fd27 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -40,6 +40,7 @@ #include "qScript.h" #include "ttype.h" #include "qFilter.h" +#include "httpInt.h" #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" @@ -1687,8 +1688,28 @@ static bool has(SArray* pFieldList, int32_t startIdx, const char* name) { static char* getAccountId(SSqlObj* pSql) { return pSql->pTscObj->acctId; } static char* cloneCurrentDBName(SSqlObj* pSql) { + char *p = NULL; + HttpContext *pCtx = NULL; + pthread_mutex_lock(&pSql->pTscObj->mutex); - char *p = strdup(pSql->pTscObj->db); + STscObj *pTscObj = pSql->pTscObj; + switch (pTscObj->from) { + case TAOS_REQ_FROM_HTTP: + pCtx = pSql->param; + if (pCtx && pCtx->db[0] != '\0') { + char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN] = {0}; + int32_t len = sprintf(db, "%s%s%s", pTscObj->acctId, TS_PATH_DELIMITER, pCtx->db); + assert(len <= sizeof(db)); + + p = strdup(db); + } + break; + default: + break; + } + if (p == NULL) { + p = strdup(pSql->pTscObj->db); + } pthread_mutex_unlock(&pSql->pTscObj->mutex); return p; @@ -8684,7 +8705,6 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryInfo* pUpstream) { n += 1; } info->numOfColumns = n; - return meta; } diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h index 0a5822b908..99a5b770aa 100644 --- a/src/plugins/http/inc/httpInt.h +++ b/src/plugins/http/inc/httpInt.h @@ -150,6 +150,7 @@ typedef struct HttpContext { char ipstr[22]; char user[TSDB_USER_LEN]; // parsed from auth token or login message char pass[HTTP_PASSWORD_LEN]; + char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN]; TAOS * taos; void * ppContext; HttpSession *session; diff --git a/src/plugins/http/inc/httpRestHandle.h b/src/plugins/http/inc/httpRestHandle.h index 632a1dc647..df405685e9 100644 --- a/src/plugins/http/inc/httpRestHandle.h +++ b/src/plugins/http/inc/httpRestHandle.h @@ -22,12 +22,12 @@ #include "httpResp.h" #include "httpSql.h" -#define REST_ROOT_URL_POS 0 -#define REST_ACTION_URL_POS 1 -#define REST_USER_URL_POS 2 -#define REST_PASS_URL_POS 3 +#define REST_ROOT_URL_POS 0 +#define REST_ACTION_URL_POS 1 +#define REST_USER_USEDB_URL_POS 2 +#define REST_PASS_URL_POS 3 void restInitHandle(HttpServer* pServer); bool restProcessRequest(struct HttpContext* pContext); -#endif \ No newline at end of file +#endif diff --git a/src/plugins/http/src/httpRestHandle.c b/src/plugins/http/src/httpRestHandle.c index a285670d20..a029adec0c 100644 --- a/src/plugins/http/src/httpRestHandle.c +++ b/src/plugins/http/src/httpRestHandle.c @@ -62,11 +62,11 @@ void restInitHandle(HttpServer* pServer) { bool restGetUserFromUrl(HttpContext* pContext) { HttpParser* pParser = pContext->parser; - if (pParser->path[REST_USER_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_URL_POS].pos <= 0) { + if (pParser->path[REST_USER_USEDB_URL_POS].pos >= TSDB_USER_LEN || pParser->path[REST_USER_USEDB_URL_POS].pos <= 0) { return false; } - tstrncpy(pContext->user, pParser->path[REST_USER_URL_POS].str, TSDB_USER_LEN); + tstrncpy(pContext->user, pParser->path[REST_USER_USEDB_URL_POS].str, TSDB_USER_LEN); return true; } @@ -107,6 +107,16 @@ bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) { HttpSqlCmd* cmd = &(pContext->singleCmd); cmd->nativSql = sql; + /* find if there is db_name in url */ + pContext->db[0] = '\0'; + + HttpString *path = &pContext->parser->path[REST_USER_USEDB_URL_POS]; + if (path->pos > 0 && !(strlen(sql) > 4 && (sql[0] == 'u' || sql[0] == 'U') && + (sql[1] == 's' || sql[1] == 'S') && (sql[2] == 'e' || sql[2] == 'E') && sql[3] == ' ')) + { + snprintf(pContext->db, /*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN, "%s", path->str); + } + pContext->reqType = HTTP_REQTYPE_SINGLE_SQL; if (timestampFmt == REST_TIMESTAMP_FMT_LOCAL_STRING) { pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod; diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index c2e723732a..0dd451f72d 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -419,6 +419,11 @@ void httpProcessRequest(HttpContext *pContext) { &(pContext->taos)); httpDebug("context:%p, fd:%d, user:%s, try connect tdengine, taos:%p", pContext, pContext->fd, pContext->user, pContext->taos); + + if (pContext->taos != NULL) { + STscObj *pObj = pContext->taos; + pObj->from = TAOS_REQ_FROM_HTTP; + } } else { httpExecCmd(pContext); } From 913bb6d2cf07969fcb7538be30d8638c9b87867e Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 23 Aug 2021 11:08:40 +0800 Subject: [PATCH 122/165] [TD-6005]: test cases --- tests/http/restful/http_create_db.c | 429 ++++++++++++++++++++++++++ tests/http/restful/http_create_tb.c | 433 ++++++++++++++++++++++++++ tests/http/restful/http_drop_db.c | 433 ++++++++++++++++++++++++++ tests/http/restful/http_insert_tb.c | 455 ++++++++++++++++++++++++++++ tests/http/restful/http_query_tb.c | 432 ++++++++++++++++++++++++++ tests/http/restful/http_use_db.c | 430 ++++++++++++++++++++++++++ 6 files changed, 2612 insertions(+) create mode 100644 tests/http/restful/http_create_db.c create mode 100644 tests/http/restful/http_create_tb.c create mode 100644 tests/http/restful/http_drop_db.c create mode 100644 tests/http/restful/http_insert_tb.c create mode 100644 tests/http/restful/http_query_tb.c create mode 100644 tests/http/restful/http_use_db.c diff --git a/tests/http/restful/http_create_db.c b/tests/http/restful/http_create_db.c new file mode 100644 index 0000000000..0bc52fa6cc --- /dev/null +++ b/tests/http/restful/http_create_db.c @@ -0,0 +1,429 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 2048 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef unsigned short u16_t; +typedef unsigned int u32_t; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const u16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + u32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url = "/rest/sql"; + u16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(sql, REQ_MAX_LINE, "create database if not exists db%d precision 'us'", i); + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket ar %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd at %d to epoll\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_create_tb.c b/tests/http/restful/http_create_tb.c new file mode 100644 index 0000000000..91ffc54627 --- /dev/null +++ b/tests/http/restful/http_create_tb.c @@ -0,0 +1,433 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 2048 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef unsigned short u16_t; +typedef unsigned int u32_t; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const u16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + u32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url_prefix = "/rest/sql"; + char url[ITEM_MAX_LINE]; + u16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(url, 0, ITEM_MAX_LINE); + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i); + snprintf(sql, REQ_MAX_LINE, "create table if not exists tb%d (ts timestamp, index int, val binary(40))", i); + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_drop_db.c b/tests/http/restful/http_drop_db.c new file mode 100644 index 0000000000..f82db901dd --- /dev/null +++ b/tests/http/restful/http_drop_db.c @@ -0,0 +1,433 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 2048 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef unsigned short u16_t; +typedef unsigned int u32_t; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const u16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + u32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url_prefix = "/rest/sql"; + char url[ITEM_MAX_LINE]; + u16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(url, 0, ITEM_MAX_LINE); + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i); + snprintf(sql, REQ_MAX_LINE, "drop database if exists db%d", i); + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_insert_tb.c b/tests/http/restful/http_insert_tb.c new file mode 100644 index 0000000000..f9590d856c --- /dev/null +++ b/tests/http/restful/http_insert_tb.c @@ -0,0 +1,455 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 4096 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, uint32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, uint32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv, offset; + int epfd; + uint32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url_prefix = "/rest/sql"; + char url[ITEM_MAX_LINE]; + uint16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + struct timeval now; + int64_t start_time; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + gettimeofday(&now, NULL); + start_time = now.tv_sec * 1000000 + now.tv_usec; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(url, 0, ITEM_MAX_LINE); + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i); + + offset = 0; + + ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "insert into tb%d values ", i); + if (ret <= 0) { + printf("failed to snprintf for sql(prefix), index: %d\r\n ", i); + goto failed; + } + + offset += ret; + + while (offset < REQ_MAX_LINE - 128) { + ret = snprintf(sql + offset, REQ_MAX_LINE - offset, "(%"PRId64", %d, 'test_string_%d') ", start_time + i, i, i); + if (ret <= 0) { + printf("failed to snprintf for sql(values), index: %d\r\n ", i); + goto failed; + } + + offset += ret; + } + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 0); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_query_tb.c b/tests/http/restful/http_query_tb.c new file mode 100644 index 0000000000..e7ac0d4b01 --- /dev/null +++ b/tests/http/restful/http_query_tb.c @@ -0,0 +1,432 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 4096 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const uint16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, uint16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, uint32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, uint32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + uint32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url_prefix = "/rest/sql"; + char url[ITEM_MAX_LINE]; + uint16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(url, 0, ITEM_MAX_LINE); + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(url, ITEM_MAX_LINE, "%s/db%d", url_prefix, i); + + snprintf(sql, REQ_MAX_LINE, "select count(*) from tb%d", i); + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} diff --git a/tests/http/restful/http_use_db.c b/tests/http/restful/http_use_db.c new file mode 100644 index 0000000000..3b27022470 --- /dev/null +++ b/tests/http/restful/http_use_db.c @@ -0,0 +1,430 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define RECV_MAX_LINE 2048 +#define ITEM_MAX_LINE 128 +#define REQ_MAX_LINE 2048 +#define REQ_CLI_COUNT 100 + + +typedef enum +{ + uninited, + connecting, + connected, + datasent +} conn_stat; + + +typedef enum +{ + false, + true +} bool; + + +typedef unsigned short u16_t; +typedef unsigned int u32_t; + + +typedef struct +{ + int sockfd; + int index; + conn_stat state; + size_t nsent; + size_t nrecv; + size_t nlen; + bool error; + bool success; + struct sockaddr_in serv_addr; +} socket_ctx; + + +int set_nonblocking(int sockfd) +{ + int ret; + + ret = fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFL) | O_NONBLOCK); + if (ret == -1) { + printf("failed to fcntl for %d\r\n", sockfd); + return ret; + } + + return ret; +} + + +int create_socket(const char *ip, const u16_t port, socket_ctx *pctx) +{ + int ret; + + if (ip == NULL || port == 0 || pctx == NULL) { + printf("invalid parameter\r\n"); + return -1; + } + + pctx->sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (pctx->sockfd == -1) { + printf("failed to create socket\r\n"); + return -1; + } + + bzero(&pctx->serv_addr, sizeof(struct sockaddr_in)); + + pctx->serv_addr.sin_family = AF_INET; + pctx->serv_addr.sin_port = htons(port); + + ret = inet_pton(AF_INET, ip, &pctx->serv_addr.sin_addr); + if (ret <= 0) { + printf("inet_pton error, ip: %s\r\n", ip); + return -1; + } + + ret = set_nonblocking(pctx->sockfd); + if (ret == -1) { + printf("failed to set %d as nonblocking\r\n", pctx->sockfd); + return -1; + } + + return pctx->sockfd; +} + + +void close_sockets(socket_ctx *pctx, int cnt) +{ + int i; + + if (pctx == NULL) { + return; + } + + for (i = 0; i < cnt; i++) { + if (pctx[i].sockfd > 0) { + close(pctx[i].sockfd); + pctx[i].sockfd = -1; + } + } +} + + +int proc_pending_error(socket_ctx *ctx) +{ + int ret; + int err; + socklen_t len; + + if (ctx == NULL) { + return 0; + } + + err = 0; + len = sizeof(int); + + ret = getsockopt(ctx->sockfd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (ret == -1) { + err = errno; + } + + if (err) { + printf("failed to connect at index: %d\r\n", ctx->index); + + close(ctx->sockfd); + ctx->sockfd = -1; + + return -1; + } + + return 0; +} + + +void build_http_request(char *ip, u16_t port, char *url, char *sql, char *req_buf, int len) +{ + char req_line[ITEM_MAX_LINE]; + char req_host[ITEM_MAX_LINE]; + char req_cont_type[ITEM_MAX_LINE]; + char req_cont_len[ITEM_MAX_LINE]; + const char* req_auth = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + + if (ip == NULL || port == 0 || + url == NULL || url[0] == '\0' || + sql == NULL || sql[0] == '\0' || + req_buf == NULL || len <= 0) + { + return; + } + + snprintf(req_line, ITEM_MAX_LINE, "POST %s HTTP/1.1\r\n", url); + snprintf(req_host, ITEM_MAX_LINE, "HOST: %s:%d\r\n", ip, port); + snprintf(req_cont_type, ITEM_MAX_LINE, "%s\r\n", "Content-Type: text/plain"); + snprintf(req_cont_len, ITEM_MAX_LINE, "Content-Length: %ld\r\n\r\n", strlen(sql)); + + snprintf(req_buf, len, "%s%s%s%s%s%s", req_line, req_host, req_auth, req_cont_type, req_cont_len, sql); +} + + +int add_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_ADD, sockfd, &evs_op); +} + + +int mod_event(int epfd, int sockfd, u32_t events, void *data) +{ + struct epoll_event evs_op; + + evs_op.data.ptr = data; + evs_op.events = events; + + return epoll_ctl(epfd, EPOLL_CTL_MOD, sockfd, &evs_op); +} + + +int del_event(int epfd, int sockfd) +{ + struct epoll_event evs_op; + + evs_op.events = 0; + evs_op.data.ptr = NULL; + + return epoll_ctl(epfd, EPOLL_CTL_DEL, sockfd, &evs_op); +} + + +int main() +{ + int i; + int ret, n, nsent, nrecv; + int epfd; + u32_t events; + char *str; + socket_ctx *pctx, ctx[REQ_CLI_COUNT]; + char *ip = "127.0.0.1"; + char *url = "/rest/sql"; + u16_t port = 6041; + struct epoll_event evs[REQ_CLI_COUNT]; + char sql[REQ_MAX_LINE]; + char send_buf[REQ_CLI_COUNT][REQ_MAX_LINE + 5 * ITEM_MAX_LINE]; + char recv_buf[REQ_CLI_COUNT][RECV_MAX_LINE]; + int count; + + signal(SIGPIPE, SIG_IGN); + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ctx[i].sockfd = -1; + ctx[i].index = i; + ctx[i].state = uninited; + ctx[i].nsent = 0; + ctx[i].nrecv = 0; + ctx[i].error = false; + ctx[i].success = false; + + memset(sql, 0, REQ_MAX_LINE); + memset(send_buf[i], 0, REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + memset(recv_buf[i], 0, RECV_MAX_LINE); + + snprintf(sql, REQ_MAX_LINE, "use db%d", i); + + build_http_request(ip, port, url, sql, send_buf[i], REQ_MAX_LINE + 5 * ITEM_MAX_LINE); + + ctx[i].nlen = strlen(send_buf[i]); + } + + epfd = epoll_create(REQ_CLI_COUNT); + if (epfd <= 0) { + printf("failed to create epoll\r\n"); + goto failed; + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = create_socket(ip, port, &ctx[i]); + if (ret == -1) { + printf("failed to create socket, index: %d\r\n", i); + goto failed; + } + } + + for (i = 0; i < REQ_CLI_COUNT; i++) { + events = EPOLLET | EPOLLIN | EPOLLOUT; + ret = add_event(epfd, ctx[i].sockfd, events, (void *) &ctx[i]); + if (ret == -1) { + printf("failed to add sockfd to epoll, index: %d\r\n", i); + goto failed; + } + } + + count = 0; + + for (i = 0; i < REQ_CLI_COUNT; i++) { + ret = connect(ctx[i].sockfd, (struct sockaddr *) &ctx[i].serv_addr, sizeof(ctx[i].serv_addr)); + if (ret == -1) { + if (errno != EINPROGRESS) { + printf("connect error, index: %d\r\n", ctx[i].index); + (void) del_event(epfd, ctx[i].sockfd); + close(ctx[i].sockfd); + ctx[i].sockfd = -1; + } else { + ctx[i].state = connecting; + count++; + } + + continue; + } + + ctx[i].state = connected; + count++; + } + + printf("clients: %d\r\n", count); + + while (count > 0) { + n = epoll_wait(epfd, evs, REQ_CLI_COUNT, 2); + if (n == -1) { + if (errno != EINTR) { + printf("epoll_wait error, reason: %s\r\n", strerror(errno)); + break; + } + } else { + for (i = 0; i < n; i++) { + if (evs[i].events & EPOLLERR) { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("event error, index: %d\r\n", pctx->index); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } else if (evs[i].events & EPOLLIN) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nrecv = recv(pctx->sockfd, recv_buf[pctx->index] + pctx->nrecv, RECV_MAX_LINE, 0); + if (nrecv == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to recv, index: %d, reason: %s\r\n", pctx->index, strerror(errno)); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } else if (nrecv == 0) { + printf("peer closed connection, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + break; + } + + pctx->nrecv += nrecv; + if (pctx->nrecv > 12) { + if (pctx->error == false && pctx->success == false) { + str = recv_buf[pctx->index] + 9; + if (str[0] != '2' || str[1] != '0' || str[2] != '0') { + printf("response error, index: %d, recv: %s\r\n", pctx->index, recv_buf[pctx->index]); + pctx->error = true; + } else { + printf("response ok, index: %d\r\n", pctx->index); + pctx->success = true; + } + } + } + } + } else if (evs[i].events & EPOLLOUT) { + pctx = (socket_ctx *) evs[i].data.ptr; + if (pctx->state == connecting) { + ret = proc_pending_error(pctx); + if (ret == 0) { + printf("client connected, index: %d\r\n", pctx->index); + pctx->state = connected; + } else { + printf("client connect failed, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + + continue; + } + } + + for ( ;; ) { + nsent = send(pctx->sockfd, send_buf[pctx->index] + pctx->nsent, pctx->nlen - pctx->nsent, 0); + if (nsent == -1) { + if (errno != EAGAIN && errno != EINTR) { + printf("failed to send, index: %d\r\n", pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + + break; + } + + if (nsent == (int) (pctx->nlen - pctx->nsent)) { + printf("request done, request: %s, index: %d\r\n", send_buf[pctx->index], pctx->index); + + pctx->state = datasent; + + events = EPOLLET | EPOLLIN; + (void) mod_event(epfd, pctx->sockfd, events, (void *)pctx); + + break; + } else { + pctx->nsent += nsent; + } + } + } else { + pctx = (socket_ctx *) evs[i].data.ptr; + printf("unknown event(%u), index: %d\r\n", evs[i].events, pctx->index); + (void) del_event(epfd, pctx->sockfd); + close(pctx->sockfd); + pctx->sockfd = -1; + count--; + } + } + } + } + +failed: + + if (epfd > 0) { + close(epfd); + } + + close_sockets(ctx, REQ_CLI_COUNT); + + return 0; +} From 95b1135eaa55b2a5d4cbad1de6f78fb4be5c27e5 Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 23 Aug 2021 13:31:01 +0800 Subject: [PATCH 123/165] [TD-6001]: fixed missing zlib.h error --- src/client/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 77417a24a4..0d06e5d39c 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -4,6 +4,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(jni) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc) AUX_SOURCE_DIRECTORY(src SRC) From 6bc27b402d7124901adb0553fdfdb9aca5712ff6 Mon Sep 17 00:00:00 2001 From: xywang Date: Mon, 23 Aug 2021 13:35:11 +0800 Subject: [PATCH 124/165] [TD-6005]: fixed missing zlib.h error --- src/client/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/CMakeLists.txt b/src/client/CMakeLists.txt index 77417a24a4..0d06e5d39c 100644 --- a/src/client/CMakeLists.txt +++ b/src/client/CMakeLists.txt @@ -4,6 +4,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) INCLUDE_DIRECTORIES(jni) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/plugins/http/inc) AUX_SOURCE_DIRECTORY(src SRC) From 4ad2e2369545035208a3e0707605e6f1ad576644 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 23 Aug 2021 14:53:13 +0800 Subject: [PATCH 125/165] [TD-6169]: windows dll client can not quit. --- src/connector/python/taos/cinterface.py | 7 ++- src/os/src/detail/osFile.c | 2 +- ...o-Run-Test-And-How-To-Add-New-Test-Case.md | 6 +++ tests/pytest/cluster/clusterSetup.py | 4 +- tests/pytest/dockerCluster/basic.py | 4 +- tests/pytest/insert/binary.py | 5 +- tests/pytest/query/queryWithTaosdKilled.py | 3 +- tests/pytest/test.py | 2 +- tests/pytest/util/dnodes-default.py | 39 ++++++++------- tests/pytest/util/dnodes-no-random-fail.py | 39 ++++++++------- tests/pytest/util/dnodes-random-fail.py | 39 ++++++++------- tests/pytest/util/dnodes.py | 50 +++++++++++-------- tests/pytest/wal/addOldWalTest.py | 2 +- 13 files changed, 120 insertions(+), 82 deletions(-) diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 51e9a8667d..9bb1494f4e 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -3,6 +3,7 @@ import ctypes import platform import sys +import os from ctypes import * try: from typing import Any @@ -37,7 +38,11 @@ def _load_taos_darwin(): def _load_taos_windows(): - return ctypes.windll.LoadLibrary("taos") + if os.path.exists("c:\\Windows\\System32\\taos.dll"): + return ctypes.windll.LoadLibrary("taos") + else: + print("Please copy the \"C:\\TDengine\\driver\\taos.dll\" file to the \"C:\\windows\\system32\" directory.") + return ctypes.windll.LoadLibrary("C:\\TDengine\\driver\\taos.dll") def _load_taos(): diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index cc12968c72..57e0765750 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -368,7 +368,7 @@ int32_t taosFsync(FileFd fd) { HANDLE h = (HANDLE)_get_osfhandle(fd); - return FlushFileBuffers(h); + return !FlushFileBuffers(h); } int32_t taosRename(char *oldName, char *newName) { diff --git a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md index 6845d091b5..c5229aa0e5 100644 --- a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md +++ b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md @@ -49,6 +49,12 @@ > before the script line. Then you can look for the core file in > \/tests/pytest after the program crash. +> Note3: if you are on the windows platform, you can install the git client, +> and then add %GitPath%\usr\bin;%GitPath%\mingw64\bin;%GitPath%\bin to the system +> environment variable Path. Note that %GitPath% is the installation path of git, +> such as C:\Program Files\Git. Then you can run the test script using the +> "sh smoketest.sh" command. + ### How to add a new test case diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py index 87414303f8..809e0e9d25 100644 --- a/tests/pytest/cluster/clusterSetup.py +++ b/tests/pytest/cluster/clusterSetup.py @@ -92,13 +92,13 @@ class Node: self.conn.run("yes|./install.sh") def configTaosd(self, taosConfigKey, taosConfigValue): - self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + self.conn.run("sudo echo %s %s >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def removeTaosConfig(self, taosConfigKey, taosConfigValue): self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def configHosts(self, ip, name): - self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts')) + self.conn.run("echo %s %s >> %s" % (ip, name, '/etc/hosts')) def removeData(self): try: diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py index 871d69790d..5188aa4a80 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/dockerCluster/basic.py @@ -113,7 +113,7 @@ class BuildDockerCluser: def cfg(self, option, value, nodeIndex): cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex) - cmd = "echo '%s %s' >> %s" % (option, value, cfgPath) + cmd = "echo %s %s >> %s" % (option, value, cfgPath) self.execCmd(cmd) def updateLocalhosts(self): @@ -122,7 +122,7 @@ class BuildDockerCluser: print(result) if result is None or result.isspace(): print("==========") - cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" + cmd = "echo 172.27.0.7 tdnode1 >> /etc/hosts" display = "echo %s" % cmd self.execCmd(display) self.execCmd(cmd) diff --git a/tests/pytest/insert/binary.py b/tests/pytest/insert/binary.py index 0cbb7876c6..44e42bec03 100644 --- a/tests/pytest/insert/binary.py +++ b/tests/pytest/insert/binary.py @@ -53,8 +53,9 @@ class TDTestCase: tdLog.info("tdSql.checkData(0, 0, '34567')") tdSql.checkData(0, 0, '34567') tdLog.info("insert into tb values (now+4a, \"'';\")") - config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') - result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) + # config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') + # result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) + result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(tdSql.cursor._connection._config)).readlines()) if "Query OK" not in result: tdLog.exit("err:insert '';") tdLog.info('drop database db') tdSql.execute('drop database db') diff --git a/tests/pytest/query/queryWithTaosdKilled.py b/tests/pytest/query/queryWithTaosdKilled.py index 28f9b87636..fb1384093c 100644 --- a/tests/pytest/query/queryWithTaosdKilled.py +++ b/tests/pytest/query/queryWithTaosdKilled.py @@ -34,7 +34,8 @@ class TDTestCase: path = tdDnodes.dnodes[1].getDnodeRootDir(1) print(path) tdLog.info("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) - os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + # os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + os.makedirs("%s/data/vnode/vnode2/wal/old" % path, exist_ok=True) # like "mkdir -p" def run(self): # os.system("rm -rf %s/ " % tdDnodes.getDnodesRootDir()) diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 97dca6be18..e0a9e339a9 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -55,7 +55,7 @@ if __name__ == "__main__": restart = True if key in ['-f', '--file']: - fileName = value + fileName = value.replace('\r', "") if key in ['-p', '--path']: deployPath = value diff --git a/tests/pytest/util/dnodes-default.py b/tests/pytest/util/dnodes-default.py index 085e083149..43a92431ba 100644 --- a/tests/pytest/util/dnodes-default.py +++ b/tests/pytest/util/dnodes-default.py @@ -60,7 +60,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -73,17 +73,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -149,17 +151,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -320,7 +325,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes-no-random-fail.py b/tests/pytest/util/dnodes-no-random-fail.py index 2627575e61..3bcea493dc 100644 --- a/tests/pytest/util/dnodes-no-random-fail.py +++ b/tests/pytest/util/dnodes-no-random-fail.py @@ -58,7 +58,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -71,17 +71,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -147,17 +149,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -318,7 +323,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes-random-fail.py b/tests/pytest/util/dnodes-random-fail.py index 4f4cdcc0d0..7d99980e67 100644 --- a/tests/pytest/util/dnodes-random-fail.py +++ b/tests/pytest/util/dnodes-random-fail.py @@ -58,7 +58,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -71,17 +71,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -147,17 +149,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -318,7 +323,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 0f4919ba96..589bde7474 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -14,6 +14,7 @@ import sys import os import os.path +import platform import subprocess from time import sleep from util.log import * @@ -61,7 +62,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -74,17 +75,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -185,17 +188,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -246,7 +252,7 @@ class TDDnode: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if (("taosd.exe") in files) or (("taosd") in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root)-len("/build/bin")] @@ -267,7 +273,11 @@ class TDDnode: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + if platform.system()=="Windows": + cmd = "mintty %s -c %s" % ( + binPath, self.cfgDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( binPath, self.cfgDir) else: valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" @@ -292,7 +302,7 @@ class TDDnode: i += 1 if i>50: break - popen = subprocess.Popen('tail -f ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + popen = subprocess.Popen('tail -f -n +0 ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) pid = popen.pid # print('Popen.pid:' + str(pid)) timeout = time.time() + 60*2 @@ -404,7 +414,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py index 2f4dcd5ce8..36056d1bc2 100644 --- a/tests/pytest/wal/addOldWalTest.py +++ b/tests/pytest/wal/addOldWalTest.py @@ -31,7 +31,7 @@ class TDTestCase: def createOldDirAndAddWal(self): oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" - os.system("sudo echo 'test' >> %s/wal" % oldDir) + os.system("sudo echo test >> %s/wal" % oldDir) def run(self): From 607fe1b7d31464d8b7301ca7ccd8b04130812af3 Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:25:26 +0800 Subject: [PATCH 126/165] Update docs.md --- .../cn/08.connector/01.java/docs.md | 137 +++++++----------- 1 file changed, 49 insertions(+), 88 deletions(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index 641ef05a2e..4f0ee3702c 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -2,8 +2,6 @@ ## 总体介绍 -TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索下载。 - `taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 ![tdengine-connector](page://images/tdengine-jdbc-connector.png) @@ -14,12 +12,10 @@ TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实 * RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。 * JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。 -TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: +TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但TDengine与关系对象型数据库的使用场景和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: * TDengine 目前不支持针对单条数据记录的删除操作。 * 目前不支持事务操作。 -* 目前不支持嵌套查询(nested query)。 -* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。 ### JDBC-JNI和JDBC-RESTful的对比 @@ -50,9 +46,12 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 -注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。 +注意:与 JNI 方式不同,RESTful 接口是无状态的。在使用JDBC-RESTful时,需要在sql中指定表、超级表的数据库名称。例如: +```sql +INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('beijing') VALUES(now, 24.6); +``` -### TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 +## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | -------------------- | ----------------- | -------- | @@ -65,7 +64,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 | 1.0.2 | 1.6.1.x 及以上 | 1.8.x | | 1.0.1 | 1.6.1.x 及以上 | 1.8.x | -### TDengine DataType 和 Java DataType +## TDengine DataType 和 Java DataType TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: @@ -82,36 +81,27 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 | BINARY | byte array | | NCHAR | java.lang.String | -## 安装 +## 安装Java Connector -Java连接器支持的系统有: Linux 64/Windows x64/Windows x86。 - -**安装前准备:** - -- 已安装TDengine服务器端 -- 已安装好TDengine应用驱动,具体请参照 [安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver) 章节 - -TDengine 为了方便 Java 应用使用,遵循 JDBC 标准(3.0)API 规范提供了 `taos-jdbcdriver` 实现。可以通过 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 搜索并下载。 - -由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 +### 安装前准备 +使用Java Connector连接数据库前,需要具备以下条件: +1. Linux或Windows操作系统 +2. Java 1.8以上运行时环境 +3. TDengine-client(使用JDBC-JNI时必须,使用JDBC-RESTful时非必须) +**注意**:由于 TDengine 的应用驱动是使用C语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 - libtaos.so 在 Linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 - - taos.dll 在 Windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 -注意:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。 - -### 如何获取 TAOS-JDBCDriver - -**maven仓库** +**注意**:在 Windows 环境开发时需要安装 TDengine 对应的 [windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client),Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端](https://www.taosdata.com/cn/getting-started/#快速上手) 连接远程 TDengine Server。 +### 通过maven获取JDBC driver 目前 taos-jdbcdriver 已经发布到 [Sonatype Repository](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) 仓库,且各大仓库都已同步。 - - [sonatype](https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver) - [mvnrepository](https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver) - [maven.aliyun](https://maven.aliyun.com/mvn/search) -maven 项目中使用如下 pom.xml 配置即可: +maven 项目中,在pom.xml 中添加以下依赖: ```xml-dtd com.taosdata.jdbc @@ -119,39 +109,22 @@ maven 项目中使用如下 pom.xml 配置即可: 2.0.18 ``` -**源码编译打包** -下载 TDengine 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。 +### 通过源码编译获取JDBC driver -### 示例程序 - -示例程序源码位于install_directory/examples/JDBC,有如下目录: - -JDBCDemo JDBC示例源程序 - -JDBCConnectorChecker JDBC安装校验源程序及jar包 - -Springbootdemo springboot示例源程序 - -SpringJdbcTemplate SpringJDBC模板 - -### 安装验证 - -运行如下指令: - -```Bash -cd {install_directory}/examples/JDBC/JDBCConnectorChecker -java -jar JDBCConnectorChecker.jar -host +可以通过下载TDengine的源码,自己编译最新版本的java connector +```shell +git clone https://github.com/taosdata/TDengine.git +cd TDengine/src/connector/jdbc +mvn clean package -Dmaven.test.skip=true ``` - -验证通过将打印出成功信息。 +编译后,在target目录下会产生taos-jdbcdriver-2.0.XX-dist.jar的jar包。 ## Java连接器的使用 ### 获取连接 #### 指定URL获取连接 - 通过指定URL获取连接,如下所示: ```java @@ -159,23 +132,19 @@ Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(jdbcUrl); ``` - 以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要: - 1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”; 2. jdbcUrl 以“jdbc:TAOS-RS://”开头; 3. 使用 6041 作为连接端口。 如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示: - ```java Class.forName("com.taosdata.jdbc.TSDBDriver"); String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(jdbcUrl); ``` - 以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 **注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库(Linux 下是 libtaos.so;Windows 下是 taos.dll)。 @@ -194,6 +163,9 @@ url中的配置参数如下: * charset:客户端使用的字符集,默认值为系统字符集。 * locale:客户端语言环境,默认值系统当前 locale。 * timezone:客户端使用的时区,默认值为系统当前时区。 +* batchfetch: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。 +* timestampFormat: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。 +* batchErrorIgnore:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。 #### 指定URL和Properties获取连接 @@ -222,11 +194,13 @@ properties 中的配置参数如下: * TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 * TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。 * TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。 +* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: 仅在使用JDBC-JNI时生效。true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。 +* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: 仅在使用JDBC-RESTful时生效. 'TIMESTAMP':结果集中timestamp类型的字段为一个long值; 'UTC':结果集中timestamp类型的字段为一个UTC时间格式的字符串; 'STRING':结果集中timestamp类型的字段为一个本地时间格式的字符串。默认值为'STRING'。 +* TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE:true:在执行Statement的executeBatch时,如果中间有一条sql执行失败,继续执行下面的sq了。false:不再执行失败sql后的任何语句。默认值为:false。 #### 使用客户端配置文件建立连接 当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。如下所示: - 1. 在 Java 应用中不指定 hostname 和 port ```java @@ -243,7 +217,6 @@ public Connection getConn() throws Exception{ ``` 2. 在配置文件中指定 firstEp 和 secondEp - ``` # first fully qualified domain name (FQDN) for TDengine system firstEp cluster_node1:6030 @@ -424,9 +397,9 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ``` 其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。 -### 订阅 +## 订阅 -#### 创建 +### 创建 ```java TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); @@ -440,7 +413,7 @@ TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from met 如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 -#### 消费数据 +### 消费数据 ```java int total = 0; @@ -458,7 +431,7 @@ while(true) { `consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 -#### 关闭订阅 +### 关闭订阅 ```java sub.close(true); @@ -466,7 +439,7 @@ sub.close(true); `close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 -### 关闭资源 +## 关闭资源 ```java resultSet.close(); @@ -478,19 +451,8 @@ conn.close(); ## 与连接池使用 -**HikariCP** - -* 引入相应 HikariCP maven 依赖: - -```xml - - com.zaxxer - HikariCP - 3.4.1 - -``` - -* 使用示例如下: +### HikariCP +使用示例如下: ```java public static void main(String[] args) throws SQLException { @@ -522,19 +484,8 @@ conn.close(); > 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 > 更多 HikariCP 使用问题请查看[官方说明](https://github.com/brettwooldridge/HikariCP)。 -**Druid** - -* 引入相应 Druid maven 依赖: - -```xml - - com.alibaba - druid - 1.1.20 - -``` - -* 使用示例如下: +### Druid +使用示例如下: ```java public static void main(String[] args) throws Exception { @@ -580,6 +531,16 @@ Query OK, 1 row(s) in set (0.000141s) * Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) * Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) +### 示例程序 + +示例程序源码位于TDengine/test/examples/JDBC下: +* JDBCDemo:JDBC示例源程序 +* JDBCConnectorChecker:JDBC安装校验源程序及jar包 +* Springbootdemo:springboot示例源程序 +* SpringJdbcTemplate:SpringJDBC模板 + +请参考:![JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC) + ## 常见问题 * java.lang.UnsatisfiedLinkError: no taos in java.library.path From 915e1c0e53f80c2d49e1fe147c54085942f72fbd Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:27:27 +0800 Subject: [PATCH 127/165] Update docs.md --- documentation20/cn/08.connector/01.java/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index 4f0ee3702c..fdb07ae179 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -531,7 +531,7 @@ Query OK, 1 row(s) in set (0.000141s) * Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) * Springboot + Mybatis 中使用,可参考 [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) -### 示例程序 +## 示例程序 示例程序源码位于TDengine/test/examples/JDBC下: * JDBCDemo:JDBC示例源程序 From 78928a135e995d198f4970f673f64beb89fc0711 Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:28:43 +0800 Subject: [PATCH 128/165] Update docs.md fix a spelling error --- documentation20/en/08.connector/01.java/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index de555b0a9c..fa8b4e1362 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -203,7 +203,7 @@ The configuration parameters in properties are as follows: * TSDBDriver.PROPERTY_KEY_LOCALE: client locale. The default value is the current system locale. * TSDBDriver.PROPERTY_KEY_TIME_ZONE: timezone used by the client. The default value is the current timezone of the system. * TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-JNI. True if batch ResultSet fetching is enabled; false if row-by-row ResultSet fetching is enabled. Default value is flase. -* TSDBDriver.PROPERTY_KEY_BATCH_LOAD: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'. +* TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT: only valid for JDBC-RESTful. 'TIMESTAMP' if you want to get a long value in a ResultSet; 'UTC' if you want to get a string in UTC date-time format in a ResultSet; 'STRING' if you want to get a local date-time format string in ResultSet. Default value is 'STRING'. * TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE: true if you want to continue executing the rest of the SQL when error happens during execute the executeBatch method in Statement; false, false if the remaining SQL statements are not executed. Default value is false. #### Establishing a connection with configuration file From d872147e992a7339c85a31dcec7b01978f406c1a Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:38:11 +0800 Subject: [PATCH 129/165] Update docs.md --- documentation20/en/08.connector/01.java/docs.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/documentation20/en/08.connector/01.java/docs.md b/documentation20/en/08.connector/01.java/docs.md index fa8b4e1362..e9eb88242f 100644 --- a/documentation20/en/08.connector/01.java/docs.md +++ b/documentation20/en/08.connector/01.java/docs.md @@ -317,14 +317,17 @@ Since version 2.1.2.0, TDengine's JDBC-JNI implementation has significantly impr Statement stmt = conn.createStatement(); Random r = new Random(); +// In the INSERT statement, the VALUES clause allows you to specify a specific column; If automatic table creation is adopted, the TAGS clause needs to set the parameter values of all TAGS columns TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)"); s.setTableName("w1"); +// set tags s.setTagInt(0, r.nextInt(10)); s.setTagString(1, "Beijing"); int numOfRows = 10; +// set values ArrayList ts = new ArrayList<>(); for (int i = 0; i < numOfRows; i++){ ts.add(System.currentTimeMillis() + i); @@ -341,9 +344,10 @@ for (int i = 0; i < numOfRows; i++){ } s.setString(2, s2, 10); +// The cache is not cleared after AddBatch. Do not bind new data again before ExecuteBatch s.columnDataAddBatch(); s.columnDataExecuteBatch(); - +// Clear the cache, after which you can bind new data(including table names, tags, values): s.columnDataClearBatch(); s.columnDataCloseBatch(); ``` @@ -499,6 +503,10 @@ Query OK, 1 row(s) in set (0.000141s) - Please refer to [SpringJdbcTemplate](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate) if using taos-jdbcdriver in Spring JdbcTemplate. - Please refer to [springbootdemo](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo) if using taos-jdbcdriver in Spring JdbcTemplate. +## Example Codes +you see sample code here: ![JDBC example](https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC) + + ## FAQ - java.lang.UnsatisfiedLinkError: no taos in java.library.path From 298f97fe0b37a444c8a70f916a66dee06cd3169f Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 23 Aug 2021 16:38:29 +0800 Subject: [PATCH 130/165] remove dulplicate init --- tests/pytest/util/dnodes.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 0f4919ba96..5572e81f37 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -20,9 +20,9 @@ from util.log import * class TDSimClient: - def __init__(self): + def __init__(self, path): self.testCluster = False - + self.path = path self.cfgDict = { "numOfLogLines": "100000000", "numOfThreadsPerCore": "2.0", @@ -41,10 +41,7 @@ class TDSimClient: "jnidebugFlag": "135", "qdebugFlag": "135", "telemetryReporting": "0", - } - def init(self, path): - self.__init__() - self.path = path + } def getLogDir(self): self.logDir = "%s/sim/psim/log" % (self.path) @@ -480,8 +477,8 @@ class TDDnodes: for i in range(len(self.dnodes)): self.dnodes[i].init(self.path) - self.sim = TDSimClient() - self.sim.init(self.path) + self.sim = TDSimClient(self.path) + # self.sim.init(self.path) def setTestCluster(self, value): self.testCluster = value From 89b64600640d81ac31d742ce68dbf3e85e910896 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Mon, 23 Aug 2021 16:42:27 +0800 Subject: [PATCH 131/165] remove dulplicate init --- tests/pytest/util/dnodes.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 5572e81f37..b176035b57 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -478,7 +478,6 @@ class TDDnodes: self.dnodes[i].init(self.path) self.sim = TDSimClient(self.path) - # self.sim.init(self.path) def setTestCluster(self, value): self.testCluster = value From 6a0735a62a06e377f607d33d6a260046bd33bb10 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 23 Aug 2021 17:20:31 +0800 Subject: [PATCH 132/165] [TD-5331] : move to some other doc space. --- documentation20/cn/08.connector/docs.md | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 5b695b845a..364961ca63 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -315,10 +315,6 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线 1. 调用 `taos_stmt_init` 创建参数绑定对象; 2. 调用 `taos_stmt_prepare` 解析 INSERT 语句; 3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname` 来设置表名; - * 从 2.1.6.0 版本开始,对于向一个超级表下的多个子表同时写入数据(每个子表写入的数据较少,可能只有一行)的情形,提供了一个专用的优化接口 `taos_stmt_set_sub_tbname`,可以通过提前载入 meta 数据以及避免对 SQL 语法的重复解析来节省总体的处理时间(但这个优化方法并不支持自动建表语法)。具体使用方法如下: - 1. 必须先提前调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta; - 2. 然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname` 来设置表名; - 3. 后续子表用 `taos_stmt_set_sub_tbname` 来设置表名。 4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags` 来设置表名和 TAGS 的值; 5. 调用 `taos_stmt_bind_param_batch` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param` 以单行的方式设置 VALUES 的值; 6. 调用 `taos_stmt_add_batch` 把当前绑定的参数加入批处理; @@ -362,12 +358,6 @@ typedef struct TAOS_BIND { (2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) 当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。 -- `int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name)` - - (2.1.6.0 版本新增,仅支持用于替换 INSERT 语句中、属于同一个超级表下的多个子表中、作为写入目标的第 2 个到第 n 个子表的表名) - 当 SQL 语句中的表名使用了 `?` 占位时,如果想要一批写入的表是多个属于同一个超级表的子表,那么可以使用此函数绑定除第一个子表之外的其他子表的表名。 - *注意:*在使用时,客户端必须先调用 `taos_load_table_info` 来加载所有需要的超级表和子表的 table meta,然后对一个超级表的第一个子表调用 `taos_stmt_set_tbname`,后续子表用 `taos_stmt_set_sub_tbname`。 - - `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)` (2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值) From ce587d9981eb54169201c952895c153fafb140f9 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 23 Aug 2021 17:39:32 +0800 Subject: [PATCH 133/165] [TD-6277] : remove outdated description for func "last_row()". --- documentation20/cn/12.taos-sql/docs.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 16b52f5773..48409537bb 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -1197,8 +1197,6 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 适用于:**表、超级表**。 - 说明:与LAST函数不同,LAST_ROW不支持时间范围限制,强制返回最后一条记录。 - 限制:LAST_ROW()不能与INTERVAL一起使用。 示例: From 068318ded98e906c77a38b8f283368f35debb12f Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Mon, 23 Aug 2021 17:57:34 +0800 Subject: [PATCH 134/165] [TD-6188] add new benchmark tool --- packaging/tools/make_install.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 7851587c82..55ca1174c9 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -142,6 +142,7 @@ function install_bin() { if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : + ${csudo} rm -f ${bin_link_dir}/perfMonitor || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/set_core || : fi @@ -167,6 +168,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : + [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : fi From 1251b6e381b048d752ec944576a2b4c28647b070 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 23 Aug 2021 18:03:48 +0800 Subject: [PATCH 135/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 2 ++ src/util/src/tcache.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c04765b065..ad29b58660 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,7 +199,9 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. +#if !defined(TD_WINDOWS) atexit(taos_cleanup); +#endif tscDebug("client is initialized successfully"); } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index aaa1d5ba9e..5f2acef2a5 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -538,7 +538,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { // wait for the refresh thread quit before destroying the cache object. // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. - for (int i = 0; i < 20&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { + while(atomic_load_8(&pCacheObj->deleting)) { + // for (int i = 0; i < 20&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } From ce6141b4afdda13e981160b86ff4d095cb76c9e4 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 23 Aug 2021 18:17:41 +0800 Subject: [PATCH 136/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 2 -- src/util/src/tcache.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index ad29b58660..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,9 +199,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. -#if !defined(TD_WINDOWS) atexit(taos_cleanup); -#endif tscDebug("client is initialized successfully"); } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 5f2acef2a5..26c2d97fe9 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -538,8 +538,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { // wait for the refresh thread quit before destroying the cache object. // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. - while(atomic_load_8(&pCacheObj->deleting)) { - // for (int i = 0; i < 20&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { + // while(atomic_load_8(&pCacheObj->deleting)) { + for (int i = 0; i < 60&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } From a44075c2f1a0599c8f0297fa43a683c06e075a66 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Mon, 23 Aug 2021 18:28:30 +0800 Subject: [PATCH 137/165] [TD-6169]: windows dll client can not quit. --- src/util/src/tcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 26c2d97fe9..be5584d182 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -721,8 +721,6 @@ void* taosCacheTimedRefresh(void *handle) { continue; } - pthread_mutex_unlock(&guard); - if ((count % pCacheObj->checkTick) != 0) { continue; } @@ -742,6 +740,8 @@ void* taosCacheTimedRefresh(void *handle) { } taosTrashcanEmpty(pCacheObj, false); + + pthread_mutex_unlock(&guard); } } From 708981bb2b095a8b6cbb4e8caf25af1118db6aff Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 23 Aug 2021 18:35:31 +0800 Subject: [PATCH 138/165] Hotfix/sangshuduo/td 6194 taosdemo wrong data (#7526) * [TD-6194]: taosdemo wrong data * fix few format mistakes. --- src/kit/taosdemo/taosdemo.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index f222266ee8..977b51ee46 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2141,7 +2141,7 @@ static void xDumpFieldToFile(FILE* fp, const char* val, fprintf(fp, "%d", *((int32_t *)val)); break; case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%" PRId64, *((int64_t *)val)); + fprintf(fp, "%"PRId64"", *((int64_t *)val)); break; case TSDB_DATA_TYPE_FLOAT: fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); @@ -5242,7 +5242,7 @@ static int64_t generateData(char *recBuf, char **data_type, int64_t timestamp, int lenOfBinary) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; - pstr += sprintf(pstr, "(%" PRId64, timestamp); + pstr += sprintf(pstr, "(%"PRId64"", timestamp); int columnCount = g_args.num_of_CPR; @@ -5254,9 +5254,9 @@ static int64_t generateData(char *recBuf, char **data_type, } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) { pstr += sprintf(pstr, ",%d", rand_int()); } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) { - pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) { - pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); + pstr += sprintf(pstr, ",%"PRId64"", rand_bigint()); } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) { pstr += sprintf(pstr, ",%10.4f", rand_float()); } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) { @@ -8069,7 +8069,7 @@ static void *specifiedTableQuery(void *sarg) { uint64_t currentPrintTime = taosGetTimestampMs(); uint64_t endTs = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { - debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n", + debugPrint("%s() LN%d, endTs=%"PRIu64" ms, startTs=%"PRIu64" ms\n", __func__, __LINE__, endTs, startTs); printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n", pThreadInfo->threadID, From ac3766acf383a7c7d2f0e8d608051f568db6c4b0 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 23 Aug 2021 19:23:57 +0800 Subject: [PATCH 139/165] [TD-5940] : describe FQDN resolving test mode of taos. --- documentation20/cn/11.administrator/docs.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index f9061200f9..ff44dd1225 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -800,7 +800,7 @@ taos -n sync -P 6042 -h `taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP` -从 2.1.7.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: +从 2.1.8.0 版本开始,taos 工具新提供了一个网络速度诊断的模式,可以对一个正在运行中的 taosd 实例或者 `taos -n server` 方式模拟的一个服务端实例,以非压缩传输的方式进行网络测速。这个模式下可供调整的参数如下: -n:设为“speed”时,表示对网络速度进行诊断。 -h:所要连接的服务端的 FQDN 或 ip 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 @@ -809,6 +809,15 @@ taos -n sync -P 6042 -h -l:单个网络包的大小(单位:字节)。最小值是 1024、最大值是 1024*1024*1024,默认值为 1000。 -S:网络封包的类型。可以是 TCP 或 UDP,默认值为 TCP。 +#### FQDN 解析速度诊断 + +`taos -n fqdn -h ` + +从 2.1.8.0 版本开始,taos 工具新提供了一个 FQDN 解析速度的诊断模式,可以对一个目标 FQDN 地址尝试解析,并记录解析过程中所消耗的时间。这个模式下可供调整的参数如下: + +-n:设为“fqdn”时,表示对 FQDN 解析进行诊断。 +-h:所要解析的目标 FQDN 地址。如果不设置这一项,会使用本机 taos.cfg 文件中 FQDN 参数的设置作为默认值。 + #### 服务端日志 taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。 From 92b7a21490c6be8f4cfc98d5b45bf58897d4e2e1 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 00:21:14 +0800 Subject: [PATCH 140/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 2 ++ src/util/src/tcache.c | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c04765b065..ad29b58660 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,7 +199,9 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. +#if !defined(TD_WINDOWS) atexit(taos_cleanup); +#endif tscDebug("client is initialized successfully"); } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index be5584d182..dc1f961725 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -538,8 +538,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { // wait for the refresh thread quit before destroying the cache object. // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. - // while(atomic_load_8(&pCacheObj->deleting)) { - for (int i = 0; i < 60&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { + while(atomic_load_8(&pCacheObj->deleting)) { + // for (int i = 0; i < 60&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } @@ -721,6 +721,8 @@ void* taosCacheTimedRefresh(void *handle) { continue; } + pthread_mutex_unlock(&guard); + if ((count % pCacheObj->checkTick) != 0) { continue; } @@ -740,8 +742,6 @@ void* taosCacheTimedRefresh(void *handle) { } taosTrashcanEmpty(pCacheObj, false); - - pthread_mutex_unlock(&guard); } } From 8ca78abf3a271ef19492aca3ce87834e66e9c96e Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 24 Aug 2021 06:51:54 +0800 Subject: [PATCH 141/165] [TD-5852]: taosdemo data generation race. (#7532) --- src/kit/taosdemo/taosdemo.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 977b51ee46..50f35faa63 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1336,7 +1336,7 @@ static char *rand_bool_str(){ static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randbool_buff + (cursor * BOOL_BUFF_LEN); + return g_randbool_buff + ((cursor % MAX_PREPARED_RAND) * BOOL_BUFF_LEN); } static int32_t rand_bool(){ @@ -1351,7 +1351,8 @@ static char *rand_tinyint_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randtinyint_buff + (cursor * TINYINT_BUFF_LEN); + return g_randtinyint_buff + + ((cursor % MAX_PREPARED_RAND) * TINYINT_BUFF_LEN); } static int32_t rand_tinyint() @@ -1367,7 +1368,8 @@ static char *rand_smallint_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randsmallint_buff + (cursor * SMALLINT_BUFF_LEN); + return g_randsmallint_buff + + ((cursor % MAX_PREPARED_RAND) * SMALLINT_BUFF_LEN); } static int32_t rand_smallint() @@ -1383,7 +1385,7 @@ static char *rand_int_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randint_buff + (cursor * INT_BUFF_LEN); + return g_randint_buff + ((cursor % MAX_PREPARED_RAND) * INT_BUFF_LEN); } static int32_t rand_int() @@ -1399,7 +1401,8 @@ static char *rand_bigint_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randbigint_buff + (cursor * BIGINT_BUFF_LEN); + return g_randbigint_buff + + ((cursor % MAX_PREPARED_RAND) * BIGINT_BUFF_LEN); } static int64_t rand_bigint() @@ -1415,7 +1418,7 @@ static char *rand_float_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_randfloat_buff + (cursor * FLOAT_BUFF_LEN); + return g_randfloat_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN); } @@ -1432,7 +1435,8 @@ static char *demo_current_float_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_rand_current_buff + (cursor * FLOAT_BUFF_LEN); + return g_rand_current_buff + + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN); } static float UNUSED_FUNC demo_current_float() @@ -1449,7 +1453,8 @@ static char *demo_voltage_int_str() static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_rand_voltage_buff + (cursor * INT_BUFF_LEN); + return g_rand_voltage_buff + + ((cursor % MAX_PREPARED_RAND) * INT_BUFF_LEN); } static int32_t UNUSED_FUNC demo_voltage_int() @@ -1464,7 +1469,7 @@ static char *demo_phase_float_str() { static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; - return g_rand_phase_buff + (cursor * FLOAT_BUFF_LEN); + return g_rand_phase_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN); } static float UNUSED_FUNC demo_phase_float(){ @@ -5199,7 +5204,8 @@ static int64_t generateStbRowData( "SMALLINT", 8)) { tmp = rand_smallint_str(); tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen + 1, SMALLINT_BUFF_LEN)); + tstrncpy(pstr + dataLen, tmp, + min(tmpLen + 1, SMALLINT_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "TINYINT", 7)) { tmp = rand_tinyint_str(); @@ -5212,9 +5218,9 @@ static int64_t generateStbRowData( tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BOOL_BUFF_LEN)); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "TIMESTAMP", 9)) { - tmp = rand_int_str(); + tmp = rand_bigint_str(); tmpLen = strlen(tmp); - tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, INT_BUFF_LEN)); + tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN)); } else { errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); From 5b5da38a10390ca04d1e7693a9c4ce246588adde Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 09:32:27 +0800 Subject: [PATCH 142/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index ad29b58660..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,9 +199,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. -#if !defined(TD_WINDOWS) atexit(taos_cleanup); -#endif tscDebug("client is initialized successfully"); } From 594a19d5d34fb3af179900c88d56ff3421435c0f Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 10:28:06 +0800 Subject: [PATCH 143/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 3 -- src/connector/python/taos/cinterface.py | 7 ++- src/os/src/detail/osFile.c | 2 +- src/util/src/tcache.c | 4 +- ...o-Run-Test-And-How-To-Add-New-Test-Case.md | 6 +++ tests/pytest/cluster/clusterSetup.py | 4 +- tests/pytest/dockerCluster/basic.py | 4 +- tests/pytest/insert/binary.py | 5 +- tests/pytest/query/queryWithTaosdKilled.py | 3 +- tests/pytest/test.py | 2 +- tests/pytest/util/dnodes-default.py | 39 ++++++++------- tests/pytest/util/dnodes-no-random-fail.py | 39 ++++++++------- tests/pytest/util/dnodes-random-fail.py | 39 ++++++++------- tests/pytest/util/dnodes.py | 50 +++++++++++-------- tests/pytest/wal/addOldWalTest.py | 2 +- 15 files changed, 123 insertions(+), 86 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 8c8afc8d88..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,10 +199,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. - // But in the dll, the child thread will be killed before atexit takes effect.So taos_cleanup is not necessary. -#if !defined(TD_WINDOWS) atexit(taos_cleanup); -#endif tscDebug("client is initialized successfully"); } diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 51e9a8667d..9bb1494f4e 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -3,6 +3,7 @@ import ctypes import platform import sys +import os from ctypes import * try: from typing import Any @@ -37,7 +38,11 @@ def _load_taos_darwin(): def _load_taos_windows(): - return ctypes.windll.LoadLibrary("taos") + if os.path.exists("c:\\Windows\\System32\\taos.dll"): + return ctypes.windll.LoadLibrary("taos") + else: + print("Please copy the \"C:\\TDengine\\driver\\taos.dll\" file to the \"C:\\windows\\system32\" directory.") + return ctypes.windll.LoadLibrary("C:\\TDengine\\driver\\taos.dll") def _load_taos(): diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index cc12968c72..57e0765750 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -368,7 +368,7 @@ int32_t taosFsync(FileFd fd) { HANDLE h = (HANDLE)_get_osfhandle(fd); - return FlushFileBuffers(h); + return !FlushFileBuffers(h); } int32_t taosRename(char *oldName, char *newName) { diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 69b3741e13..dc1f961725 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -537,7 +537,9 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - while(atomic_load_8(&pCacheObj->deleting) != 0) { + // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. + while(atomic_load_8(&pCacheObj->deleting)) { + // for (int i = 0; i < 60&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } diff --git a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md index 6845d091b5..c5229aa0e5 100644 --- a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md +++ b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md @@ -49,6 +49,12 @@ > before the script line. Then you can look for the core file in > \/tests/pytest after the program crash. +> Note3: if you are on the windows platform, you can install the git client, +> and then add %GitPath%\usr\bin;%GitPath%\mingw64\bin;%GitPath%\bin to the system +> environment variable Path. Note that %GitPath% is the installation path of git, +> such as C:\Program Files\Git. Then you can run the test script using the +> "sh smoketest.sh" command. + ### How to add a new test case diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py index 87414303f8..809e0e9d25 100644 --- a/tests/pytest/cluster/clusterSetup.py +++ b/tests/pytest/cluster/clusterSetup.py @@ -92,13 +92,13 @@ class Node: self.conn.run("yes|./install.sh") def configTaosd(self, taosConfigKey, taosConfigValue): - self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + self.conn.run("sudo echo %s %s >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def removeTaosConfig(self, taosConfigKey, taosConfigValue): self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def configHosts(self, ip, name): - self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts')) + self.conn.run("echo %s %s >> %s" % (ip, name, '/etc/hosts')) def removeData(self): try: diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py index 871d69790d..5188aa4a80 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/dockerCluster/basic.py @@ -113,7 +113,7 @@ class BuildDockerCluser: def cfg(self, option, value, nodeIndex): cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex) - cmd = "echo '%s %s' >> %s" % (option, value, cfgPath) + cmd = "echo %s %s >> %s" % (option, value, cfgPath) self.execCmd(cmd) def updateLocalhosts(self): @@ -122,7 +122,7 @@ class BuildDockerCluser: print(result) if result is None or result.isspace(): print("==========") - cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" + cmd = "echo 172.27.0.7 tdnode1 >> /etc/hosts" display = "echo %s" % cmd self.execCmd(display) self.execCmd(cmd) diff --git a/tests/pytest/insert/binary.py b/tests/pytest/insert/binary.py index 0cbb7876c6..44e42bec03 100644 --- a/tests/pytest/insert/binary.py +++ b/tests/pytest/insert/binary.py @@ -53,8 +53,9 @@ class TDTestCase: tdLog.info("tdSql.checkData(0, 0, '34567')") tdSql.checkData(0, 0, '34567') tdLog.info("insert into tb values (now+4a, \"'';\")") - config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') - result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) + # config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') + # result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) + result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(tdSql.cursor._connection._config)).readlines()) if "Query OK" not in result: tdLog.exit("err:insert '';") tdLog.info('drop database db') tdSql.execute('drop database db') diff --git a/tests/pytest/query/queryWithTaosdKilled.py b/tests/pytest/query/queryWithTaosdKilled.py index 28f9b87636..fb1384093c 100644 --- a/tests/pytest/query/queryWithTaosdKilled.py +++ b/tests/pytest/query/queryWithTaosdKilled.py @@ -34,7 +34,8 @@ class TDTestCase: path = tdDnodes.dnodes[1].getDnodeRootDir(1) print(path) tdLog.info("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) - os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + # os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + os.makedirs("%s/data/vnode/vnode2/wal/old" % path, exist_ok=True) # like "mkdir -p" def run(self): # os.system("rm -rf %s/ " % tdDnodes.getDnodesRootDir()) diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 97dca6be18..e0a9e339a9 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -55,7 +55,7 @@ if __name__ == "__main__": restart = True if key in ['-f', '--file']: - fileName = value + fileName = value.replace('\r', "") if key in ['-p', '--path']: deployPath = value diff --git a/tests/pytest/util/dnodes-default.py b/tests/pytest/util/dnodes-default.py index 085e083149..43a92431ba 100644 --- a/tests/pytest/util/dnodes-default.py +++ b/tests/pytest/util/dnodes-default.py @@ -60,7 +60,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -73,17 +73,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -149,17 +151,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -320,7 +325,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes-no-random-fail.py b/tests/pytest/util/dnodes-no-random-fail.py index 2627575e61..3bcea493dc 100644 --- a/tests/pytest/util/dnodes-no-random-fail.py +++ b/tests/pytest/util/dnodes-no-random-fail.py @@ -58,7 +58,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -71,17 +71,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -147,17 +149,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -318,7 +323,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes-random-fail.py b/tests/pytest/util/dnodes-random-fail.py index 4f4cdcc0d0..7d99980e67 100644 --- a/tests/pytest/util/dnodes-random-fail.py +++ b/tests/pytest/util/dnodes-random-fail.py @@ -58,7 +58,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -71,17 +71,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -147,17 +149,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -318,7 +323,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 0f4919ba96..589bde7474 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -14,6 +14,7 @@ import sys import os import os.path +import platform import subprocess from time import sleep from util.log import * @@ -61,7 +62,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -74,17 +75,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -185,17 +188,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -246,7 +252,7 @@ class TDDnode: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if (("taosd.exe") in files) or (("taosd") in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root)-len("/build/bin")] @@ -267,7 +273,11 @@ class TDDnode: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + if platform.system()=="Windows": + cmd = "mintty %s -c %s" % ( + binPath, self.cfgDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( binPath, self.cfgDir) else: valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" @@ -292,7 +302,7 @@ class TDDnode: i += 1 if i>50: break - popen = subprocess.Popen('tail -f ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + popen = subprocess.Popen('tail -f -n +0 ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) pid = popen.pid # print('Popen.pid:' + str(pid)) timeout = time.time() + 60*2 @@ -404,7 +414,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py index 2f4dcd5ce8..36056d1bc2 100644 --- a/tests/pytest/wal/addOldWalTest.py +++ b/tests/pytest/wal/addOldWalTest.py @@ -31,7 +31,7 @@ class TDTestCase: def createOldDirAndAddWal(self): oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" - os.system("sudo echo 'test' >> %s/wal" % oldDir) + os.system("sudo echo test >> %s/wal" % oldDir) def run(self): From 6456cdba3f68cc478f80c0e4bf851b1355cf5a7a Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 24 Aug 2021 10:43:07 +0800 Subject: [PATCH 144/165] [TD-6009]restful support url set database test[ci skip] --- tests/script/http/httpTest | Bin 0 -> 23008 bytes tests/script/http/httpTest.c | 128 +++++++++++++++++++++++++++++++++++ tests/script/http/makefile | 2 + 3 files changed, 130 insertions(+) create mode 100755 tests/script/http/httpTest create mode 100644 tests/script/http/httpTest.c create mode 100644 tests/script/http/makefile diff --git a/tests/script/http/httpTest b/tests/script/http/httpTest new file mode 100755 index 0000000000000000000000000000000000000000..68aab9aa63bbf5c64593e591aaa6eb4c6621a7fb GIT binary patch literal 23008 zcmeHvdvsLSdGDSX9qH&jLc&OZVQ|0|!Fpl9JdCZ^z{nOLgJgqWhtbSPnjpq?dxrviY?uxHfjBTiGMAvF^ac-AGnwuMvh88)ti{(d1(pdd{ z`!PCmG;(t9>K}LA#Rko{zvuq;xA)m+pMCZ|gWmQnPKSf3zF&kkFqyn<8)rCpcI;Qvi|9a8Vm4Z@Bc1_hZZ zsLHzwdK8!6HK}IaDa)Iy9WKjwDoE{3sjffVy>3lif3T)M97!IiIk0|R&AK(UiD>O= z&L_X94C+%`w|7xi>sNeX-N5*Sm zDBiiPr9T=8b^5#eLyYsXavq9@%@B)+{6Q8o`v@8V{DP*?PesEK7Klb7p@7K}(ZIft z$wCLhChIe!kpxS`;^Bzd!we%~`UCrnK;J&2#~P)NcIMw2G%i6zYh$_O0r8$IEOzd!siYGq(d2vE6k(-`o> z&7OED#6Xn3)96r7cz|;2*w*&8<`!dh?dsaK>HHd)=^cnhq#1*P>XcW#3%MNp(Lc)5 zKjJD(j17Y7z=%+QzDQ$`2Y<Tt(FU3w$MvCMf_Z`(66)5uUhC; z7P{)UG#=AMYIz?Hf4hx;eXO(ta=zJa_8G9{s>;0+ELZ`A-idpC? zro=m7p`#(w>5zpkr#77TgoR$1mS*gTg^mVKr=u2nNt%jzo`tUNnZ!PBp_f_YPgv;I zab(;=mtBNwowm^FeyGwp3w^#K0-m?f7g*@mvezPTEdtjfa4iD=xe<7;bjh!Lhu_kD z!`ct(81oICHl5SweTQGwU*QE!uPp$1YI;Q>KUEQ zrzY=8<8-5(nruqrbc39lY)IoY0hyXymd5EuIW<|9#_0w*HR(v>G%=Z)eD@<&U%FAE z{%M?Ukf?teryC>cuW&?fq?V%B%6R;MzgClpzi7jsv*F*c;eTbr|J;T@Wy42o_>c|% zj15oP@Btehvf&Td@Gcv^&4znz_+}fv)`~a0-IjX&L0{^pzQaGe($U#IqTP)a_l*?) z7#+bkviWNO)AQ#+<^3Z-Hz4C1(N+`KaMhfTuKmtU$fIlLPhTuudI+t@FMxRK$f$f9p#C?@2}ta3ic`NmtK5i=AqYkVg=55__pJ!Z(48oDSvxJ+RD zfLRyo$8!+qS3u~Q7`h5jTyS>iSAfhn-jp=^qVe#XJl;$uK0Ad4y-6tuYFVV2~GL3bYw}74+ItONZ3`W}f(t&xr#)Rnha>Z~YlKct3E4^kmt zphC|6n#4!Q+%R|JBn?r%)OTU=OI(&a^!p_HB`(XWOtJeqp%l9j3WCrUijQUrb_4zS z#^IrVAlHtbeO$YOIH48h|NTl7H8Mn~FZBl5`~pM|U!6{pfAgu#8@Z_w5|RGVVx=hN zVenGi>fsm3HXHo1%y6H7f=m7Kzx)bAuW#6UHRxL<`n#!Jc0lp(UrAp#hu`Y3Uk&38 zt8b~T$3I`nd$wo7``rdM;T``Xo-2Ig@le97OMJQ?tQXIEUxPE=^Ap|+b;0h2v)=zj z=#M76mw>K5>zyR@%?a;R-GKu^*!i#<74CX*I63Yc@t(y%w3WsqRQwj+*j7%vHa=tuE=LyLX4c+zTjJ3~6PA_z3 z#Xa>)Tio~hQlB~gaj}1gVn(rlbyn;zo%O!WUZURcGF?uWpM-G)`<#VzP>{}}{f~on z0r0|bqn<+GH|N1oF}RhJ6mdE*m+_v%mHQFh9n{!oX0XqL{kE0e#o60H@47Ucd=<_5 z4z*`5`rV9C()vfM*3#@+4bYnKo}z2~=j(>MPQe1Hp0TR_>?&DCfx}(rECqfDi-*0Z z9K6s*_Xh8&M_!unp5~^ua$=0CN3|T@hT-0Oz6tlMk+maM+fQV-{UvVuw6tBc!F&3V zZ@|Jis%YpJyrSnRzX$nQjsAHMGcLB6t-&X}Uk$>|3GZ}*}-S{}U(A3$K`u8s1NX`8q z?`mK98=Avku6rMKJM$ZtxwPSja(>gEnrctI)0+C%rs;|w`wqY2@NM`}@=coG-}gY% z{Y?)vJ=kQt($iCGEstjYG_3OHg7lLBcjwyAwFq2`!2d@Pp!WonI(F>p^wh0Lczm6m z9d!-04K%|BZKG#JV&jS+F!emp9*XpueW3S)3i^t~%s=_U#uJLiqw$$nG7^_>btJ~$ zvB*5V2$I=gJR0)^{ieU$p9p!vJs!OLNtg)_=4Irkcw3Yq5Q&;rp}7sL{vGs5hLS7? zZuEydWA z0p`CuJ$(hR3AZ`7gR#H;V0xO~XMGbl;SRtl%zI*h9!zwO03HFn07yx_dvH9wlQ|B! z9gB+cbdn)D?J+ruhaaqPG1i(#k51Uz!q^(hz^6vCQR3cGQh7&d;Y0c%cKgDOpS*eb z4a6q>`|(!^U%4U?k%(^M&kcIL^nvJ9)|2=vL!I~@4R4A2v(A>{Tvwe7E^+>|;IDjd zdintArTI~Z;{OB4k0J&iWbofg^WOshZp!AT#LZ92waQ6wFvyb9s#xgN$q!{sT3vJb45wv zw8x54kpSua#l;e@qzNCA+N-4YSuGOe^!}-Z5?A}Jq*|K)!AH|k5*ed+sYq&0wO(#I zQhSfiNj_HHd1{mX(fpcHr2x#j=Y~j9jaAG2H{z*~v2#+N=ChR4UM96S=OPv!t#n=D z5-^&-Q^GnePpbSS5?69q2IKWZah!z3FpiJQ0$;l>*b`sQNj)h_evO(@Q{Q@Bpj3QgoLLhJTKuz39m?q&6zy8CG<#GFJYsE9TM8y z{H>_oC`7ilv~2WLcXcNtX42D8UwdqYZzsy1urdex)R{o1h_@Lo{Ma?)xfL zZE7oYC+InxraO87i$00QE`Wt>icWeEalXD_1At4Ja02+9+5tIT_ZQ8JwsBTCB>svkAvilQ~ryKD4(nU z7ZP&h*1%$(13wpX8uVKL^x+h8!~7LK^}91>g4kxD&9 zVjDQ((cgz~&PI+@>+L8rXA?*2^&b;*D@WGrXGv!>M;i4evew8EpH7!{P7_Bu^xq?E z%^caSuO$y!uj_#7Ui}#o+fs2ikUsrWWWdM0j_Gd^r;Q_~P6J5J9fiXH2lNRtux%a% z?U25Nbhek?1LQHikKEaz-w$wD{~C$iRoV~a37xL!oIS-~063zLlg@n$9s=j6{%tDf z{;J1-jOnzoKIZ}M&T;)gGVoyjR_L72$BAPU;I%s&*FPeiz1+9c`cdL^>rVi@%A6X2 zmREHGa{2e+Cudbj0Tq<1-^qW8QAq7Zeic!NAzllRg6H5?t{$iCw@^T?{uyMsEEU0J z%R!|EEx>IbXGznZ!z!fd3!)}>;FyvLkQ{b)6cgxvd{DyaP;R2 zh~X4IegRqTV=;3sgX#RL3VPai=2L!Q#R1BfLZ)EJysrS{{Sx$?f~EQ!Ky+%@f@P)u z6?uq#ld#{PM;7z`gj^_?(w9?#50T67OBarVUGPCE={xnS#QCt|1t2;4Cer_9IagtY zZ7{h8+T6K3Rv{u){0YYn5X&w?k(J#GJqMd#=_tw{0rk3AILI8u#ovMhmPO&rQ0n(XqjWrZoFSDh+8L z7>mNp(O)7=J~_DfkQDihkYH7}BI6jS5H^kvR(+4kj8sy`bB&%D#41y}o~4TI zkQMv(VAT()VjTrB&hDzZKRY(G3UWwy z3czlM74rFB>GQLLZ1Gp5%>7b^_zy}xRhnOXR6OrH`2s(raGh1&M`7BSdq(aj=gVub z&b-TC-RAa?~zfNUuRy1Yz;?s(S{9_jK*(I39rwS>D; z{t3Z9+O>2b(F^%nfgkCrx|0g8kgkt+Es6kh3++?jQaSSrs36zq#e1q=CeN>{{3(EI z^mJF%t%NTqs1$so>VJ~8%7T>~AG^EiFR0}fO8ny8Rn$2hiwibz{`sz|=Slwhf;Ns{ z>8jdC_!6amkB9|X_t3aH?GgksF9@ZAK=ydF8U#-EIKGfAqrsl%SHX0dV?{}Mp{rz3 zNtt6_o*NTW=XK0gvaxVe;fBIC%t>6!Sz+OAkicjIZWP#0isns~f%pCA< zDkS0za6->mw?-b<>y0Gq8tUu9 zkwAYk7-C#dBC`d%!#M2o+ePY3?1Qrj;?NNtJj&2!II3jEI`QtMHUR(WOi?h@Q`?7o z7UL$6#YluRNXdX{Y-wxvf`CfS<}@mtHRako z=ZZ%(xQQB6CgRq1<1wFOg}o&@m?E&onUKZV@0l+#W*SC(r9m$dnHTG4x2q5haw zyhn3Cua$sb(4*!385wb$(&p=ZCrK_}->elR^<|eXX_{W4k7oYU9QvrXSnr#f5!EX6 zWm=hDr@8T`9fth(@vm3^K8onoG*ioG&Ky@A{pe3?t2hj59ze$(nnu9XO771Tblz01 zt;{BQ{|hf@j%6tF-jiDXrDRfDm_#vqNc%>T-=kGDBmHoVyr`#5>y<7D0rBjMj4GD49cj@5+%$(f^fEP+#QhKaX^ zf&Do2iX(acxPJi0;Gly;bNj%;DOFEP!!7a&&{H#DCL^KEy`e}bj(wap1O9lRZ?iZm z>#6DW)b#ih2P1*Lcr+4CCTfzAhr*Fy4S%Pa02A*i1N&-X@u-Qje$jXhu83%av;-s2 z?@#n8Hnxv~5$Lht+%bNj5a<5ljGxFe47w0*UJYL($$=d`;-x4Z(`qvcMcqp9UiZznV8q#xd<8r{jBbU^6@VImyC@xy^||3Mas zM)*0!1h%~L7rn9x5)20;dp-OIJ}@3jCW63dhpZ^6b?eTiZOjNH<3=FbpB&(aAZY`x z5%c$kSVyOCr?;uqXu)r%*SKRx+xDjBb}x5;1_mAs8bMfHJQ_9YI$LYj)z@!e2^?VL z{-Nf{NFv-D2?afYK7U-4y0di~H}P;N9<}(HmL!K!$KL*sh$J(3^(YMS=tClKB8V(u zr2P<$LND-{k&Gl0q2Oxn9}h9L6`~eHl;D$r>IK&VQ0L=9u+4j?*+&OBg`2y)ciCH_ zAG-o%8`C@bgb-|L-{IZuZJ`3l8x%m%Yv8}|!LSU3G5}5Ys0IZa2R(ZumVQOu!4~;e zZ)a1B&)W)VTxsjppa|a6cJgo#4HVG_GNOo+s&r(OIt*1^6cP@G40@%{@bZ6uI(*%@ zERr#U$1fQPe>yo67!lK+et&Ob^-S9`V<3vV99^h9#9V41*>8qbtL^9gO^prc6uea4 z#dme0OQGkfk%21 zG<$O4);X4yp3Brat5wH^)lsz`n@QK1TBppU=QFjwnMp6evXYe|@|Y?!wccvwx@h&; zZKWvrV&=(8FJXJjvdNdSSXR0_{XGz?mJ5rLYJEGCjsq|rD@Ej8temOuJy^Le?3TEE zgO#G>=QFilZsoezb?I}-ne+uL+d8catKMo|J5wIB-$pA%;5-qe59Zi;zB9gA&izw#f$T$$U%0@vU|0o^7pCe0qJr6qBKV-B23YVYV?mq=R zoBwmw>uqkQg7q&)#+I_#`8AiBKdWESUKt0Q-LK~J3eNVkn$u_Z^G)2&?0)!3j4#eC z*Y91BpNIIkW!J(ao+m%2OT+>abI|`7bdN<&Wxo!3w(mWB4NoQ>dRx_~Bn7-4bXynU z4BEAyjs88*-LT`f*}0G8owLV{AIxFr?K$ZAbNDlWcxLlw@f`AXobG1!B`{hN&Sr;R zL}a7WI(atwA4>c7MW@ftA%7fniW{@V?K#jrJmQ%@uYz9YtYYf@jpDvebn-|lP)jqBahCM2v*pVxv;cHA~Q zO;X;z9=nCpGs`4wuavj1()Q0G|L7d_Kb?b4>(>-#yPx!~!!z3*_Fd3#$U=W}4*OyS z;p5gaYHRDn|5*ziy8>~up*BkM*Y2=k`g^gP(@dC2Z0irOnNPGB=715PkF?O^P%vur z_D8$@{YDUvUYNTl4`3cV5bF<_p&-6ul2wAfg%URW@wopWrs!t;AijX&AHWngIWTY# zDi#d0zOb1gYJKi9w%}pMYk0S}8V2+-WP{9T-Lt)ETU$#ehwo|y3D2O0*C#FbT6Z#I zYx|Dorgme;mMy!yokk};7m-!^5Q`B=B)Ql8TP=-^)=#pi8LQa(n#udPuiJ=k#AM3z zC!S1_5fU?d{;3%o%l7e_8SWbKnVT5`eLIHll+GlnM|Ya;XL4qJr6!ZDD$PHRLvhb! zXFThPkL6?vpkLs5MSQ&mG0ymu4S0#D(dUl@`8R;D(ts7Hh(XVPcp#IX=gBme;ZEka zdNKtq%}HJ}vEE^HClb;Z{`jxV(MN z`kAB5F9&5>5RaglG*+89IDkhqKr=2NK3F7jcmxl{W2_e2y+gHF*Q~{YNep|*4_Zjw z$uPcDhSy+1s;RjRt1P{Yi}zs_h1CWRMo@%+c&g@{{aE_Ix=IFOfEN$-`$<9OV*Mto zfYq9z13+n9fOM4i_}WmP90~fcE|JEB4KXAL4V6bxSfYS2p^i3X zwHS*AFvNfs>Q44T(jV!?z$MXeq$jHK{_gI0Xurbv!j+Ul1hb65|7Rq+?cr93Ihsi7 zc~e3A%%5bf=@UTk-Y2cE?t2P)tis6KAN$sUX4T(~`v)Zj?bBOzpBGY$(fd@ZzIrZD z(4zz;wCmF@0Bj|Xs``(~@)fL>dU!j*lihzmsCfI3uD^P2Rxl#(5 zW+TW@OqITRu2=Aa6jbFaeN}(jUP1cJQcgYpE2#Eokv{3z^`8NaVxsJ0vL%v)a#|;e zqO3H6%rX2~^*vHg!A4uaZRXkE*!0!9gMvz4m8ozAPuTR;`hx9Z z3I>&+g!b}(Xwz5gEDAnhYd?GWm!OXqr~DeEBGh_}`ktm;Uf~M93!PGnzS`gDsUd4K z{Z(KyxYGX(7}R&=u#;ZjQQt>ZfuZq(lG0P{_;t{zifR4ZNebW1ln*BA`V-U{ePt&X z%2s{WC?)F!ROQ(96;A61WJ{H=o*&%5qkfqzUy3WUOzE#mQ$&V*rKBM3PgQxr5x<+H zLAzbM?8-Ux_esUORiLDCyFP7Av(rfQ%zB~l6C2ZpvX(jY|8k?yzflENptH)_A@%L` zJhxdWykWawRQXn@u02@<2V>0wyDaN}1JJejQ{hnKHw`Z~7rEjSa&epXy;+kgSJ7xW S)+WMPpWPuO?zJh{Q1&0v)9gzC literal 0 HcmV?d00001 diff --git a/tests/script/http/httpTest.c b/tests/script/http/httpTest.c new file mode 100644 index 0000000000..36ce6b95ba --- /dev/null +++ b/tests/script/http/httpTest.c @@ -0,0 +1,128 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAXLINE 1024 + +typedef struct { + pthread_t pid; + int threadId; + int rows; + int tables; +} ThreadObj; + +void post(char *ip,int port,char *page,char *msg) { + int sockfd,n; + char recvline[MAXLINE]; + struct sockaddr_in servaddr; + char content[4096]; + char content_page[50]; + sprintf(content_page,"POST /%s HTTP/1.1\r\n",page); + char content_host[50]; + sprintf(content_host,"HOST: %s:%d\r\n",ip,port); + char content_type[] = "Content-Type: text/plain\r\n"; + char Auth[] = "Authorization: Basic cm9vdDp0YW9zZGF0YQ==\r\n"; + char content_len[50]; + sprintf(content_len,"Content-Length: %ld\r\n\r\n",strlen(msg)); + sprintf(content,"%s%s%s%s%s%s",content_page,content_host,content_type,Auth,content_len,msg); + if((sockfd = socket(AF_INET,SOCK_STREAM,0)) < 0) { + printf("socket error\n"); + } + bzero(&servaddr,sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(port); + if(inet_pton(AF_INET,ip,&servaddr.sin_addr) <= 0) { + printf("inet_pton error\n"); + } + if(connect(sockfd,(struct sockaddr *)&servaddr,sizeof(servaddr)) < 0) { + printf("connect error\n"); + } + write(sockfd,content,strlen(content)); + printf("%s\n", content); + while((n = read(sockfd,recvline,MAXLINE)) > 0) { + recvline[n] = 0; + if(fputs(recvline,stdout) == EOF) { + printf("fputs error\n"); + } + } + if(n < 0) { + printf("read error\n"); + } +} + +void singleThread() { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sql"; + char page1[] = "rest/sql/db1"; + char page2[] = "rest/sql/db2"; + char nonexit[] = "rest/sql/xxdb"; + + post(ip,port,page,"drop database if exists db1"); + post(ip,port,page,"create database if not exists db1"); + post(ip,port,page,"drop database if exists db2"); + post(ip,port,page,"create database if not exists db2"); + post(ip,port,page1,"create table t11 (ts timestamp, c1 int)"); + post(ip,port,page2,"create table t21 (ts timestamp, c1 int)"); + post(ip,port,page1,"insert into t11 values (now, 1)"); + post(ip,port,page2,"insert into t21 values (now, 2)"); + post(ip,port,nonexit,"create database if not exists db3"); +} + +void execute(void *params) { + char ip[] = "127.0.0.1"; + int port = 6041; + char page[] = "rest/sql"; + char *unique = calloc(1, 1024); + char *sql = calloc(1, 1024); + ThreadObj *pThread = (ThreadObj *)params; + printf("Thread %d started\n", pThread->threadId); + sprintf(unique, "rest/sql/db%d",pThread->threadId); + sprintf(sql, "drop database if exists db%d", pThread->threadId); + post(ip,port,page, sql); + sprintf(sql, "create database if not exists db%d", pThread->threadId); + post(ip,port,page, sql); + for (int i = 0; i < pThread->tables; i++) { + sprintf(sql, "create table t%d (ts timestamp, c1 int)", i); + post(ip,port,unique, sql); + } + for (int i = 0; i < pThread->rows; i++) { + sprintf(sql, "insert into t%d values (now + %ds, %d)", pThread->threadId, i, pThread->threadId); + post(ip,port,unique, sql); + } + free(unique); + free(sql); + return; +} + +void multiThread() { + int numOfThreads = 100; + int numOfTables = 100; + int numOfRows = 1; + ThreadObj *threads = calloc((size_t)numOfThreads, sizeof(ThreadObj)); + for (int i = 0; i < numOfThreads; i++) { + ThreadObj *pthread = threads + i; + pthread_attr_t thattr; + pthread->threadId = i + 1; + pthread->rows = numOfRows; + pthread->tables = numOfTables; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))execute, pthread); + } + for (int i = 0; i < numOfThreads; i++) { + pthread_join(threads[i].pid, NULL); + } + free(threads); +} + +int main() { + singleThread(); + multiThread(); + exit(0); +} \ No newline at end of file diff --git a/tests/script/http/makefile b/tests/script/http/makefile new file mode 100644 index 0000000000..d1be683eda --- /dev/null +++ b/tests/script/http/makefile @@ -0,0 +1,2 @@ +all: + gcc -g httpTest.c -o httpTest -lpthread \ No newline at end of file From f0b8a9eb6d1c2d24cfca2abc403f4437e63e7497 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Tue, 24 Aug 2021 10:44:42 +0800 Subject: [PATCH 145/165] remove bin file[ci skip] --- tests/script/http/httpTest | Bin 23008 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100755 tests/script/http/httpTest diff --git a/tests/script/http/httpTest b/tests/script/http/httpTest deleted file mode 100755 index 68aab9aa63bbf5c64593e591aaa6eb4c6621a7fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23008 zcmeHvdvsLSdGDSX9qH&jLc&OZVQ|0|!Fpl9JdCZ^z{nOLgJgqWhtbSPnjpq?dxrviY?uxHfjBTiGMAvF^ac-AGnwuMvh88)ti{(d1(pdd{ z`!PCmG;(t9>K}LA#Rko{zvuq;xA)m+pMCZ|gWmQnPKSf3zF&kkFqyn<8)rCpcI;Qvi|9a8Vm4Z@Bc1_hZZ zsLHzwdK8!6HK}IaDa)Iy9WKjwDoE{3sjffVy>3lif3T)M97!IiIk0|R&AK(UiD>O= z&L_X94C+%`w|7xi>sNeX-N5*Sm zDBiiPr9T=8b^5#eLyYsXavq9@%@B)+{6Q8o`v@8V{DP*?PesEK7Klb7p@7K}(ZIft z$wCLhChIe!kpxS`;^Bzd!we%~`UCrnK;J&2#~P)NcIMw2G%i6zYh$_O0r8$IEOzd!siYGq(d2vE6k(-`o> z&7OED#6Xn3)96r7cz|;2*w*&8<`!dh?dsaK>HHd)=^cnhq#1*P>XcW#3%MNp(Lc)5 zKjJD(j17Y7z=%+QzDQ$`2Y<Tt(FU3w$MvCMf_Z`(66)5uUhC; z7P{)UG#=AMYIz?Hf4hx;eXO(ta=zJa_8G9{s>;0+ELZ`A-idpC? zro=m7p`#(w>5zpkr#77TgoR$1mS*gTg^mVKr=u2nNt%jzo`tUNnZ!PBp_f_YPgv;I zab(;=mtBNwowm^FeyGwp3w^#K0-m?f7g*@mvezPTEdtjfa4iD=xe<7;bjh!Lhu_kD z!`ct(81oICHl5SweTQGwU*QE!uPp$1YI;Q>KUEQ zrzY=8<8-5(nruqrbc39lY)IoY0hyXymd5EuIW<|9#_0w*HR(v>G%=Z)eD@<&U%FAE z{%M?Ukf?teryC>cuW&?fq?V%B%6R;MzgClpzi7jsv*F*c;eTbr|J;T@Wy42o_>c|% zj15oP@Btehvf&Td@Gcv^&4znz_+}fv)`~a0-IjX&L0{^pzQaGe($U#IqTP)a_l*?) z7#+bkviWNO)AQ#+<^3Z-Hz4C1(N+`KaMhfTuKmtU$fIlLPhTuudI+t@FMxRK$f$f9p#C?@2}ta3ic`NmtK5i=AqYkVg=55__pJ!Z(48oDSvxJ+RD zfLRyo$8!+qS3u~Q7`h5jTyS>iSAfhn-jp=^qVe#XJl;$uK0Ad4y-6tuYFVV2~GL3bYw}74+ItONZ3`W}f(t&xr#)Rnha>Z~YlKct3E4^kmt zphC|6n#4!Q+%R|JBn?r%)OTU=OI(&a^!p_HB`(XWOtJeqp%l9j3WCrUijQUrb_4zS z#^IrVAlHtbeO$YOIH48h|NTl7H8Mn~FZBl5`~pM|U!6{pfAgu#8@Z_w5|RGVVx=hN zVenGi>fsm3HXHo1%y6H7f=m7Kzx)bAuW#6UHRxL<`n#!Jc0lp(UrAp#hu`Y3Uk&38 zt8b~T$3I`nd$wo7``rdM;T``Xo-2Ig@le97OMJQ?tQXIEUxPE=^Ap|+b;0h2v)=zj z=#M76mw>K5>zyR@%?a;R-GKu^*!i#<74CX*I63Yc@t(y%w3WsqRQwj+*j7%vHa=tuE=LyLX4c+zTjJ3~6PA_z3 z#Xa>)Tio~hQlB~gaj}1gVn(rlbyn;zo%O!WUZURcGF?uWpM-G)`<#VzP>{}}{f~on z0r0|bqn<+GH|N1oF}RhJ6mdE*m+_v%mHQFh9n{!oX0XqL{kE0e#o60H@47Ucd=<_5 z4z*`5`rV9C()vfM*3#@+4bYnKo}z2~=j(>MPQe1Hp0TR_>?&DCfx}(rECqfDi-*0Z z9K6s*_Xh8&M_!unp5~^ua$=0CN3|T@hT-0Oz6tlMk+maM+fQV-{UvVuw6tBc!F&3V zZ@|Jis%YpJyrSnRzX$nQjsAHMGcLB6t-&X}Uk$>|3GZ}*}-S{}U(A3$K`u8s1NX`8q z?`mK98=Avku6rMKJM$ZtxwPSja(>gEnrctI)0+C%rs;|w`wqY2@NM`}@=coG-}gY% z{Y?)vJ=kQt($iCGEstjYG_3OHg7lLBcjwyAwFq2`!2d@Pp!WonI(F>p^wh0Lczm6m z9d!-04K%|BZKG#JV&jS+F!emp9*XpueW3S)3i^t~%s=_U#uJLiqw$$nG7^_>btJ~$ zvB*5V2$I=gJR0)^{ieU$p9p!vJs!OLNtg)_=4Irkcw3Yq5Q&;rp}7sL{vGs5hLS7? zZuEydWA z0p`CuJ$(hR3AZ`7gR#H;V0xO~XMGbl;SRtl%zI*h9!zwO03HFn07yx_dvH9wlQ|B! z9gB+cbdn)D?J+ruhaaqPG1i(#k51Uz!q^(hz^6vCQR3cGQh7&d;Y0c%cKgDOpS*eb z4a6q>`|(!^U%4U?k%(^M&kcIL^nvJ9)|2=vL!I~@4R4A2v(A>{Tvwe7E^+>|;IDjd zdintArTI~Z;{OB4k0J&iWbofg^WOshZp!AT#LZ92waQ6wFvyb9s#xgN$q!{sT3vJb45wv zw8x54kpSua#l;e@qzNCA+N-4YSuGOe^!}-Z5?A}Jq*|K)!AH|k5*ed+sYq&0wO(#I zQhSfiNj_HHd1{mX(fpcHr2x#j=Y~j9jaAG2H{z*~v2#+N=ChR4UM96S=OPv!t#n=D z5-^&-Q^GnePpbSS5?69q2IKWZah!z3FpiJQ0$;l>*b`sQNj)h_evO(@Q{Q@Bpj3QgoLLhJTKuz39m?q&6zy8CG<#GFJYsE9TM8y z{H>_oC`7ilv~2WLcXcNtX42D8UwdqYZzsy1urdex)R{o1h_@Lo{Ma?)xfL zZE7oYC+InxraO87i$00QE`Wt>icWeEalXD_1At4Ja02+9+5tIT_ZQ8JwsBTCB>svkAvilQ~ryKD4(nU z7ZP&h*1%$(13wpX8uVKL^x+h8!~7LK^}91>g4kxD&9 zVjDQ((cgz~&PI+@>+L8rXA?*2^&b;*D@WGrXGv!>M;i4evew8EpH7!{P7_Bu^xq?E z%^caSuO$y!uj_#7Ui}#o+fs2ikUsrWWWdM0j_Gd^r;Q_~P6J5J9fiXH2lNRtux%a% z?U25Nbhek?1LQHikKEaz-w$wD{~C$iRoV~a37xL!oIS-~063zLlg@n$9s=j6{%tDf z{;J1-jOnzoKIZ}M&T;)gGVoyjR_L72$BAPU;I%s&*FPeiz1+9c`cdL^>rVi@%A6X2 zmREHGa{2e+Cudbj0Tq<1-^qW8QAq7Zeic!NAzllRg6H5?t{$iCw@^T?{uyMsEEU0J z%R!|EEx>IbXGznZ!z!fd3!)}>;FyvLkQ{b)6cgxvd{DyaP;R2 zh~X4IegRqTV=;3sgX#RL3VPai=2L!Q#R1BfLZ)EJysrS{{Sx$?f~EQ!Ky+%@f@P)u z6?uq#ld#{PM;7z`gj^_?(w9?#50T67OBarVUGPCE={xnS#QCt|1t2;4Cer_9IagtY zZ7{h8+T6K3Rv{u){0YYn5X&w?k(J#GJqMd#=_tw{0rk3AILI8u#ovMhmPO&rQ0n(XqjWrZoFSDh+8L z7>mNp(O)7=J~_DfkQDihkYH7}BI6jS5H^kvR(+4kj8sy`bB&%D#41y}o~4TI zkQMv(VAT()VjTrB&hDzZKRY(G3UWwy z3czlM74rFB>GQLLZ1Gp5%>7b^_zy}xRhnOXR6OrH`2s(raGh1&M`7BSdq(aj=gVub z&b-TC-RAa?~zfNUuRy1Yz;?s(S{9_jK*(I39rwS>D; z{t3Z9+O>2b(F^%nfgkCrx|0g8kgkt+Es6kh3++?jQaSSrs36zq#e1q=CeN>{{3(EI z^mJF%t%NTqs1$so>VJ~8%7T>~AG^EiFR0}fO8ny8Rn$2hiwibz{`sz|=Slwhf;Ns{ z>8jdC_!6amkB9|X_t3aH?GgksF9@ZAK=ydF8U#-EIKGfAqrsl%SHX0dV?{}Mp{rz3 zNtt6_o*NTW=XK0gvaxVe;fBIC%t>6!Sz+OAkicjIZWP#0isns~f%pCA< zDkS0za6->mw?-b<>y0Gq8tUu9 zkwAYk7-C#dBC`d%!#M2o+ePY3?1Qrj;?NNtJj&2!II3jEI`QtMHUR(WOi?h@Q`?7o z7UL$6#YluRNXdX{Y-wxvf`CfS<}@mtHRako z=ZZ%(xQQB6CgRq1<1wFOg}o&@m?E&onUKZV@0l+#W*SC(r9m$dnHTG4x2q5haw zyhn3Cua$sb(4*!385wb$(&p=ZCrK_}->elR^<|eXX_{W4k7oYU9QvrXSnr#f5!EX6 zWm=hDr@8T`9fth(@vm3^K8onoG*ioG&Ky@A{pe3?t2hj59ze$(nnu9XO771Tblz01 zt;{BQ{|hf@j%6tF-jiDXrDRfDm_#vqNc%>T-=kGDBmHoVyr`#5>y<7D0rBjMj4GD49cj@5+%$(f^fEP+#QhKaX^ zf&Do2iX(acxPJi0;Gly;bNj%;DOFEP!!7a&&{H#DCL^KEy`e}bj(wap1O9lRZ?iZm z>#6DW)b#ih2P1*Lcr+4CCTfzAhr*Fy4S%Pa02A*i1N&-X@u-Qje$jXhu83%av;-s2 z?@#n8Hnxv~5$Lht+%bNj5a<5ljGxFe47w0*UJYL($$=d`;-x4Z(`qvcMcqp9UiZznV8q#xd<8r{jBbU^6@VImyC@xy^||3Mas zM)*0!1h%~L7rn9x5)20;dp-OIJ}@3jCW63dhpZ^6b?eTiZOjNH<3=FbpB&(aAZY`x z5%c$kSVyOCr?;uqXu)r%*SKRx+xDjBb}x5;1_mAs8bMfHJQ_9YI$LYj)z@!e2^?VL z{-Nf{NFv-D2?afYK7U-4y0di~H}P;N9<}(HmL!K!$KL*sh$J(3^(YMS=tClKB8V(u zr2P<$LND-{k&Gl0q2Oxn9}h9L6`~eHl;D$r>IK&VQ0L=9u+4j?*+&OBg`2y)ciCH_ zAG-o%8`C@bgb-|L-{IZuZJ`3l8x%m%Yv8}|!LSU3G5}5Ys0IZa2R(ZumVQOu!4~;e zZ)a1B&)W)VTxsjppa|a6cJgo#4HVG_GNOo+s&r(OIt*1^6cP@G40@%{@bZ6uI(*%@ zERr#U$1fQPe>yo67!lK+et&Ob^-S9`V<3vV99^h9#9V41*>8qbtL^9gO^prc6uea4 z#dme0OQGkfk%21 zG<$O4);X4yp3Brat5wH^)lsz`n@QK1TBppU=QFjwnMp6evXYe|@|Y?!wccvwx@h&; zZKWvrV&=(8FJXJjvdNdSSXR0_{XGz?mJ5rLYJEGCjsq|rD@Ej8temOuJy^Le?3TEE zgO#G>=QFilZsoezb?I}-ne+uL+d8catKMo|J5wIB-$pA%;5-qe59Zi;zB9gA&izw#f$T$$U%0@vU|0o^7pCe0qJr6qBKV-B23YVYV?mq=R zoBwmw>uqkQg7q&)#+I_#`8AiBKdWESUKt0Q-LK~J3eNVkn$u_Z^G)2&?0)!3j4#eC z*Y91BpNIIkW!J(ao+m%2OT+>abI|`7bdN<&Wxo!3w(mWB4NoQ>dRx_~Bn7-4bXynU z4BEAyjs88*-LT`f*}0G8owLV{AIxFr?K$ZAbNDlWcxLlw@f`AXobG1!B`{hN&Sr;R zL}a7WI(atwA4>c7MW@ftA%7fniW{@V?K#jrJmQ%@uYz9YtYYf@jpDvebn-|lP)jqBahCM2v*pVxv;cHA~Q zO;X;z9=nCpGs`4wuavj1()Q0G|L7d_Kb?b4>(>-#yPx!~!!z3*_Fd3#$U=W}4*OyS z;p5gaYHRDn|5*ziy8>~up*BkM*Y2=k`g^gP(@dC2Z0irOnNPGB=715PkF?O^P%vur z_D8$@{YDUvUYNTl4`3cV5bF<_p&-6ul2wAfg%URW@wopWrs!t;AijX&AHWngIWTY# zDi#d0zOb1gYJKi9w%}pMYk0S}8V2+-WP{9T-Lt)ETU$#ehwo|y3D2O0*C#FbT6Z#I zYx|Dorgme;mMy!yokk};7m-!^5Q`B=B)Ql8TP=-^)=#pi8LQa(n#udPuiJ=k#AM3z zC!S1_5fU?d{;3%o%l7e_8SWbKnVT5`eLIHll+GlnM|Ya;XL4qJr6!ZDD$PHRLvhb! zXFThPkL6?vpkLs5MSQ&mG0ymu4S0#D(dUl@`8R;D(ts7Hh(XVPcp#IX=gBme;ZEka zdNKtq%}HJ}vEE^HClb;Z{`jxV(MN z`kAB5F9&5>5RaglG*+89IDkhqKr=2NK3F7jcmxl{W2_e2y+gHF*Q~{YNep|*4_Zjw z$uPcDhSy+1s;RjRt1P{Yi}zs_h1CWRMo@%+c&g@{{aE_Ix=IFOfEN$-`$<9OV*Mto zfYq9z13+n9fOM4i_}WmP90~fcE|JEB4KXAL4V6bxSfYS2p^i3X zwHS*AFvNfs>Q44T(jV!?z$MXeq$jHK{_gI0Xurbv!j+Ul1hb65|7Rq+?cr93Ihsi7 zc~e3A%%5bf=@UTk-Y2cE?t2P)tis6KAN$sUX4T(~`v)Zj?bBOzpBGY$(fd@ZzIrZD z(4zz;wCmF@0Bj|Xs``(~@)fL>dU!j*lihzmsCfI3uD^P2Rxl#(5 zW+TW@OqITRu2=Aa6jbFaeN}(jUP1cJQcgYpE2#Eokv{3z^`8NaVxsJ0vL%v)a#|;e zqO3H6%rX2~^*vHg!A4uaZRXkE*!0!9gMvz4m8ozAPuTR;`hx9Z z3I>&+g!b}(Xwz5gEDAnhYd?GWm!OXqr~DeEBGh_}`ktm;Uf~M93!PGnzS`gDsUd4K z{Z(KyxYGX(7}R&=u#;ZjQQt>ZfuZq(lG0P{_;t{zifR4ZNebW1ln*BA`V-U{ePt&X z%2s{WC?)F!ROQ(96;A61WJ{H=o*&%5qkfqzUy3WUOzE#mQ$&V*rKBM3PgQxr5x<+H zLAzbM?8-Ux_esUORiLDCyFP7Av(rfQ%zB~l6C2ZpvX(jY|8k?yzflENptH)_A@%L` zJhxdWykWawRQXn@u02@<2V>0wyDaN}1JJejQ{hnKHw`Z~7rEjSa&epXy;+kgSJ7xW S)+WMPpWPuO?zJh{Q1&0v)9gzC From 613bd5c5231409cf66f184bd24925ef275f687f6 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 10:49:47 +0800 Subject: [PATCH 146/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 3 -- src/connector/python/taos/cinterface.py | 7 ++- src/os/src/detail/osFile.c | 2 +- src/util/src/tcache.c | 4 +- ...o-Run-Test-And-How-To-Add-New-Test-Case.md | 6 +++ tests/pytest/cluster/clusterSetup.py | 4 +- tests/pytest/dockerCluster/basic.py | 4 +- tests/pytest/insert/binary.py | 5 +- tests/pytest/query/queryWithTaosdKilled.py | 3 +- tests/pytest/test.py | 2 +- tests/pytest/util/dnodes-default.py | 39 ++++++++------- tests/pytest/util/dnodes-no-random-fail.py | 39 ++++++++------- tests/pytest/util/dnodes-random-fail.py | 39 ++++++++------- tests/pytest/util/dnodes.py | 50 +++++++++++-------- tests/pytest/wal/addOldWalTest.py | 2 +- 15 files changed, 123 insertions(+), 86 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 8c8afc8d88..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,10 +199,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. - // But in the dll, the child thread will be killed before atexit takes effect.So taos_cleanup is not necessary. -#if !defined(TD_WINDOWS) atexit(taos_cleanup); -#endif tscDebug("client is initialized successfully"); } diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 51e9a8667d..9bb1494f4e 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -3,6 +3,7 @@ import ctypes import platform import sys +import os from ctypes import * try: from typing import Any @@ -37,7 +38,11 @@ def _load_taos_darwin(): def _load_taos_windows(): - return ctypes.windll.LoadLibrary("taos") + if os.path.exists("c:\\Windows\\System32\\taos.dll"): + return ctypes.windll.LoadLibrary("taos") + else: + print("Please copy the \"C:\\TDengine\\driver\\taos.dll\" file to the \"C:\\windows\\system32\" directory.") + return ctypes.windll.LoadLibrary("C:\\TDengine\\driver\\taos.dll") def _load_taos(): diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index cc12968c72..57e0765750 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -368,7 +368,7 @@ int32_t taosFsync(FileFd fd) { HANDLE h = (HANDLE)_get_osfhandle(fd); - return FlushFileBuffers(h); + return !FlushFileBuffers(h); } int32_t taosRename(char *oldName, char *newName) { diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 69b3741e13..dc1f961725 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -537,7 +537,9 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - while(atomic_load_8(&pCacheObj->deleting) != 0) { + // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. + while(atomic_load_8(&pCacheObj->deleting)) { + // for (int i = 0; i < 60&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } diff --git a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md index 6845d091b5..c5229aa0e5 100644 --- a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md +++ b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md @@ -49,6 +49,12 @@ > before the script line. Then you can look for the core file in > \/tests/pytest after the program crash. +> Note3: if you are on the windows platform, you can install the git client, +> and then add %GitPath%\usr\bin;%GitPath%\mingw64\bin;%GitPath%\bin to the system +> environment variable Path. Note that %GitPath% is the installation path of git, +> such as C:\Program Files\Git. Then you can run the test script using the +> "sh smoketest.sh" command. + ### How to add a new test case diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py index 87414303f8..809e0e9d25 100644 --- a/tests/pytest/cluster/clusterSetup.py +++ b/tests/pytest/cluster/clusterSetup.py @@ -92,13 +92,13 @@ class Node: self.conn.run("yes|./install.sh") def configTaosd(self, taosConfigKey, taosConfigValue): - self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + self.conn.run("sudo echo %s %s >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def removeTaosConfig(self, taosConfigKey, taosConfigValue): self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def configHosts(self, ip, name): - self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts')) + self.conn.run("echo %s %s >> %s" % (ip, name, '/etc/hosts')) def removeData(self): try: diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py index 871d69790d..5188aa4a80 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/dockerCluster/basic.py @@ -113,7 +113,7 @@ class BuildDockerCluser: def cfg(self, option, value, nodeIndex): cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex) - cmd = "echo '%s %s' >> %s" % (option, value, cfgPath) + cmd = "echo %s %s >> %s" % (option, value, cfgPath) self.execCmd(cmd) def updateLocalhosts(self): @@ -122,7 +122,7 @@ class BuildDockerCluser: print(result) if result is None or result.isspace(): print("==========") - cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" + cmd = "echo 172.27.0.7 tdnode1 >> /etc/hosts" display = "echo %s" % cmd self.execCmd(display) self.execCmd(cmd) diff --git a/tests/pytest/insert/binary.py b/tests/pytest/insert/binary.py index 0cbb7876c6..44e42bec03 100644 --- a/tests/pytest/insert/binary.py +++ b/tests/pytest/insert/binary.py @@ -53,8 +53,9 @@ class TDTestCase: tdLog.info("tdSql.checkData(0, 0, '34567')") tdSql.checkData(0, 0, '34567') tdLog.info("insert into tb values (now+4a, \"'';\")") - config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') - result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) + # config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') + # result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) + result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(tdSql.cursor._connection._config)).readlines()) if "Query OK" not in result: tdLog.exit("err:insert '';") tdLog.info('drop database db') tdSql.execute('drop database db') diff --git a/tests/pytest/query/queryWithTaosdKilled.py b/tests/pytest/query/queryWithTaosdKilled.py index 28f9b87636..fb1384093c 100644 --- a/tests/pytest/query/queryWithTaosdKilled.py +++ b/tests/pytest/query/queryWithTaosdKilled.py @@ -34,7 +34,8 @@ class TDTestCase: path = tdDnodes.dnodes[1].getDnodeRootDir(1) print(path) tdLog.info("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) - os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + # os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + os.makedirs("%s/data/vnode/vnode2/wal/old" % path, exist_ok=True) # like "mkdir -p" def run(self): # os.system("rm -rf %s/ " % tdDnodes.getDnodesRootDir()) diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 97dca6be18..e0a9e339a9 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -55,7 +55,7 @@ if __name__ == "__main__": restart = True if key in ['-f', '--file']: - fileName = value + fileName = value.replace('\r', "") if key in ['-p', '--path']: deployPath = value diff --git a/tests/pytest/util/dnodes-default.py b/tests/pytest/util/dnodes-default.py index 085e083149..43a92431ba 100644 --- a/tests/pytest/util/dnodes-default.py +++ b/tests/pytest/util/dnodes-default.py @@ -60,7 +60,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -73,17 +73,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -149,17 +151,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -320,7 +325,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes-no-random-fail.py b/tests/pytest/util/dnodes-no-random-fail.py index 2627575e61..3bcea493dc 100644 --- a/tests/pytest/util/dnodes-no-random-fail.py +++ b/tests/pytest/util/dnodes-no-random-fail.py @@ -58,7 +58,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -71,17 +71,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -147,17 +149,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -318,7 +323,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes-random-fail.py b/tests/pytest/util/dnodes-random-fail.py index 4f4cdcc0d0..7d99980e67 100644 --- a/tests/pytest/util/dnodes-random-fail.py +++ b/tests/pytest/util/dnodes-random-fail.py @@ -58,7 +58,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -71,17 +71,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -147,17 +149,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -318,7 +323,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 0f4919ba96..589bde7474 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -14,6 +14,7 @@ import sys import os import os.path +import platform import subprocess from time import sleep from util.log import * @@ -61,7 +62,7 @@ class TDSimClient: self.cfgDict.update({option: value}) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -74,17 +75,19 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -185,17 +188,20 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - cmd = "mkdir -p " + self.dataDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.logDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) - cmd = "mkdir -p " + self.cfgDir - if os.system(cmd) != 0: - tdLog.exit(cmd) + os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -246,7 +252,7 @@ class TDDnode: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if (("taosd.exe") in files) or (("taosd") in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root)-len("/build/bin")] @@ -267,7 +273,11 @@ class TDDnode: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + if platform.system()=="Windows": + cmd = "mintty %s -c %s" % ( + binPath, self.cfgDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( binPath, self.cfgDir) else: valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" @@ -292,7 +302,7 @@ class TDDnode: i += 1 if i>50: break - popen = subprocess.Popen('tail -f ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + popen = subprocess.Popen('tail -f -n +0 ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) pid = popen.pid # print('Popen.pid:' + str(pid)) timeout = time.time() + 60*2 @@ -404,7 +414,7 @@ class TDDnode: tdLog.exit(cmd) def cfg(self, option, value): - cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath) + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) if os.system(cmd) != 0: tdLog.exit(cmd) diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py index 2f4dcd5ce8..36056d1bc2 100644 --- a/tests/pytest/wal/addOldWalTest.py +++ b/tests/pytest/wal/addOldWalTest.py @@ -31,7 +31,7 @@ class TDTestCase: def createOldDirAndAddWal(self): oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" - os.system("sudo echo 'test' >> %s/wal" % oldDir) + os.system("sudo echo test >> %s/wal" % oldDir) def run(self): From d3af8a9d31b1b3458305ebf3c22152eaf3c89155 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 11:05:26 +0800 Subject: [PATCH 147/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 2 + src/connector/python/taos/cinterface.py | 7 +-- src/os/src/detail/osFile.c | 2 +- src/util/src/tcache.c | 2 - ...o-Run-Test-And-How-To-Add-New-Test-Case.md | 6 --- tests/pytest/cluster/clusterSetup.py | 4 +- tests/pytest/dockerCluster/basic.py | 4 +- tests/pytest/insert/binary.py | 5 +-- tests/pytest/query/queryWithTaosdKilled.py | 3 +- tests/pytest/test.py | 2 +- tests/pytest/util/dnodes-default.py | 35 +++++++-------- tests/pytest/util/dnodes-no-random-fail.py | 35 +++++++-------- tests/pytest/util/dnodes-random-fail.py | 35 +++++++-------- tests/pytest/util/dnodes.py | 45 ++++++++----------- tests/pytest/wal/addOldWalTest.py | 2 +- 15 files changed, 76 insertions(+), 113 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index c04765b065..ad29b58660 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,7 +199,9 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. +#if !defined(TD_WINDOWS) atexit(taos_cleanup); +#endif tscDebug("client is initialized successfully"); } diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 9bb1494f4e..51e9a8667d 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -3,7 +3,6 @@ import ctypes import platform import sys -import os from ctypes import * try: from typing import Any @@ -38,11 +37,7 @@ def _load_taos_darwin(): def _load_taos_windows(): - if os.path.exists("c:\\Windows\\System32\\taos.dll"): - return ctypes.windll.LoadLibrary("taos") - else: - print("Please copy the \"C:\\TDengine\\driver\\taos.dll\" file to the \"C:\\windows\\system32\" directory.") - return ctypes.windll.LoadLibrary("C:\\TDengine\\driver\\taos.dll") + return ctypes.windll.LoadLibrary("taos") def _load_taos(): diff --git a/src/os/src/detail/osFile.c b/src/os/src/detail/osFile.c index 57e0765750..cc12968c72 100644 --- a/src/os/src/detail/osFile.c +++ b/src/os/src/detail/osFile.c @@ -368,7 +368,7 @@ int32_t taosFsync(FileFd fd) { HANDLE h = (HANDLE)_get_osfhandle(fd); - return !FlushFileBuffers(h); + return FlushFileBuffers(h); } int32_t taosRename(char *oldName, char *newName) { diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index dc1f961725..46271c6d7a 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -537,9 +537,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. while(atomic_load_8(&pCacheObj->deleting)) { - // for (int i = 0; i < 60&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } diff --git a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md index c5229aa0e5..6845d091b5 100644 --- a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md +++ b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md @@ -49,12 +49,6 @@ > before the script line. Then you can look for the core file in > \/tests/pytest after the program crash. -> Note3: if you are on the windows platform, you can install the git client, -> and then add %GitPath%\usr\bin;%GitPath%\mingw64\bin;%GitPath%\bin to the system -> environment variable Path. Note that %GitPath% is the installation path of git, -> such as C:\Program Files\Git. Then you can run the test script using the -> "sh smoketest.sh" command. - ### How to add a new test case diff --git a/tests/pytest/cluster/clusterSetup.py b/tests/pytest/cluster/clusterSetup.py index 809e0e9d25..87414303f8 100644 --- a/tests/pytest/cluster/clusterSetup.py +++ b/tests/pytest/cluster/clusterSetup.py @@ -92,13 +92,13 @@ class Node: self.conn.run("yes|./install.sh") def configTaosd(self, taosConfigKey, taosConfigValue): - self.conn.run("sudo echo %s %s >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) + self.conn.run("sudo echo '%s %s' >> %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def removeTaosConfig(self, taosConfigKey, taosConfigValue): self.conn.run("sudo sed -in-place -e '/%s %s/d' %s" % (taosConfigKey, taosConfigValue, "/etc/taos/taos.cfg")) def configHosts(self, ip, name): - self.conn.run("echo %s %s >> %s" % (ip, name, '/etc/hosts')) + self.conn.run("echo '%s %s' >> %s" % (ip, name, '/etc/hosts')) def removeData(self): try: diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py index 5188aa4a80..871d69790d 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/dockerCluster/basic.py @@ -113,7 +113,7 @@ class BuildDockerCluser: def cfg(self, option, value, nodeIndex): cfgPath = "%s/node%d/cfg/taos.cfg" % (self.dockerDir, nodeIndex) - cmd = "echo %s %s >> %s" % (option, value, cfgPath) + cmd = "echo '%s %s' >> %s" % (option, value, cfgPath) self.execCmd(cmd) def updateLocalhosts(self): @@ -122,7 +122,7 @@ class BuildDockerCluser: print(result) if result is None or result.isspace(): print("==========") - cmd = "echo 172.27.0.7 tdnode1 >> /etc/hosts" + cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" display = "echo %s" % cmd self.execCmd(display) self.execCmd(cmd) diff --git a/tests/pytest/insert/binary.py b/tests/pytest/insert/binary.py index 44e42bec03..0cbb7876c6 100644 --- a/tests/pytest/insert/binary.py +++ b/tests/pytest/insert/binary.py @@ -53,9 +53,8 @@ class TDTestCase: tdLog.info("tdSql.checkData(0, 0, '34567')") tdSql.checkData(0, 0, '34567') tdLog.info("insert into tb values (now+4a, \"'';\")") - # config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') - # result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) - result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(tdSql.cursor._connection._config)).readlines()) + config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') + result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) if "Query OK" not in result: tdLog.exit("err:insert '';") tdLog.info('drop database db') tdSql.execute('drop database db') diff --git a/tests/pytest/query/queryWithTaosdKilled.py b/tests/pytest/query/queryWithTaosdKilled.py index fb1384093c..28f9b87636 100644 --- a/tests/pytest/query/queryWithTaosdKilled.py +++ b/tests/pytest/query/queryWithTaosdKilled.py @@ -34,8 +34,7 @@ class TDTestCase: path = tdDnodes.dnodes[1].getDnodeRootDir(1) print(path) tdLog.info("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) - # os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) - os.makedirs("%s/data/vnode/vnode2/wal/old" % path, exist_ok=True) # like "mkdir -p" + os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) def run(self): # os.system("rm -rf %s/ " % tdDnodes.getDnodesRootDir()) diff --git a/tests/pytest/test.py b/tests/pytest/test.py index e0a9e339a9..97dca6be18 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -55,7 +55,7 @@ if __name__ == "__main__": restart = True if key in ['-f', '--file']: - fileName = value.replace('\r', "") + fileName = value if key in ['-p', '--path']: deployPath = value diff --git a/tests/pytest/util/dnodes-default.py b/tests/pytest/util/dnodes-default.py index 43a92431ba..8da36f3074 100644 --- a/tests/pytest/util/dnodes-default.py +++ b/tests/pytest/util/dnodes-default.py @@ -73,19 +73,17 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -151,20 +149,17 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.dataDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) - os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) - os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: diff --git a/tests/pytest/util/dnodes-no-random-fail.py b/tests/pytest/util/dnodes-no-random-fail.py index 3bcea493dc..a973f8da52 100644 --- a/tests/pytest/util/dnodes-no-random-fail.py +++ b/tests/pytest/util/dnodes-no-random-fail.py @@ -71,19 +71,17 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -149,20 +147,17 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.dataDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) - os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) - os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: diff --git a/tests/pytest/util/dnodes-random-fail.py b/tests/pytest/util/dnodes-random-fail.py index 7d99980e67..7cadca64a3 100644 --- a/tests/pytest/util/dnodes-random-fail.py +++ b/tests/pytest/util/dnodes-random-fail.py @@ -71,19 +71,17 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -149,20 +147,17 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.dataDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) - os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) - os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 589bde7474..2996639e03 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -75,19 +75,17 @@ class TDSimClient: if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "rm -rf " + self.cfgDir if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -188,20 +186,17 @@ class TDDnode: if os.system(cmd) != 0: tdLog.exit(cmd) - os.makedirs(self.dataDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.dataDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) - os.makedirs(self.logDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.logDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) - os.makedirs(self.cfgDir, exist_ok=True) # like "mkdir -p" - # cmd = "mkdir -p " + self.cfgDir - # if os.system(cmd) != 0: - # tdLog.exit(cmd) + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) cmd = "touch " + self.cfgPath if os.system(cmd) != 0: @@ -252,7 +247,7 @@ class TDDnode: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if (("taosd.exe") in files) or (("taosd") in files): + if (("taosd") in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root)-len("/build/bin")] @@ -273,11 +268,7 @@ class TDDnode: tdLog.exit("dnode:%d is not deployed" % (self.index)) if self.valgrind == 0: - if platform.system()=="Windows": - cmd = "mintty %s -c %s" % ( - binPath, self.cfgDir) - else: - cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( binPath, self.cfgDir) else: valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" @@ -302,7 +293,7 @@ class TDDnode: i += 1 if i>50: break - popen = subprocess.Popen('tail -f -n +0 ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + popen = subprocess.Popen('tail -f ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) pid = popen.pid # print('Popen.pid:' + str(pid)) timeout = time.time() + 60*2 diff --git a/tests/pytest/wal/addOldWalTest.py b/tests/pytest/wal/addOldWalTest.py index 36056d1bc2..2f4dcd5ce8 100644 --- a/tests/pytest/wal/addOldWalTest.py +++ b/tests/pytest/wal/addOldWalTest.py @@ -31,7 +31,7 @@ class TDTestCase: def createOldDirAndAddWal(self): oldDir = tdDnodes.getDnodesRootDir() + "dnode1/data/vnode/vnode2/wal/old" - os.system("sudo echo test >> %s/wal" % oldDir) + os.system("sudo echo 'test' >> %s/wal" % oldDir) def run(self): From 98a5ad09f0603b8dde1b15ec77ba89c239f0c7aa Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 24 Aug 2021 12:12:14 +0800 Subject: [PATCH 148/165] [TD-5466] handle invalid Escapes --- src/kit/shell/src/shellEngine.c | 8 ++++++-- src/util/src/tcompare.c | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index bf19394d05..efc37403b4 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -254,8 +254,12 @@ int32_t shellRunCommand(TAOS* con, char* command) { } if (c == '\\') { - esc = true; - continue; + if (quote != 0 && (*command == '_' || *command == '\\')) { + //DO nothing + } else { + esc = true; + continue; + } } if (quote == c) { diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 36480418c9..cbb6747052 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -262,6 +262,7 @@ int patternMatch(const char *patterStr, const char *str, size_t size, const SPat c1 = str[j++]; if (j <= size) { + if (c == '\\' && patterStr[i] == '_' && c1 == '_') { i++; continue; } if (c == c1 || tolower(c) == tolower(c1) || (c == pInfo->matchOne && c1 != 0)) { continue; } From 745c5a918eea53b0ad197720616ed92b68923ba9 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 13:07:54 +0800 Subject: [PATCH 149/165] [TD-6169]: windows dll client can not quit. --- src/client/src/tscSystem.c | 2 -- src/util/src/tcache.c | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index ad29b58660..c04765b065 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -199,9 +199,7 @@ void taos_init_imp(void) { // In the APIs of other program language, taos_cleanup is not available yet. // So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning. -#if !defined(TD_WINDOWS) atexit(taos_cleanup); -#endif tscDebug("client is initialized successfully"); } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 46271c6d7a..aaa1d5ba9e 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -537,7 +537,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - while(atomic_load_8(&pCacheObj->deleting)) { + // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. + for (int i = 0; i < 20&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } From ea2607a73828279cbbc4ba07f7fbecc2985292bc Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 13:38:33 +0800 Subject: [PATCH 150/165] [TD-6169]: windows dll client can not quit. --- src/util/src/tcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index aaa1d5ba9e..8d538e37bc 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -720,8 +720,6 @@ void* taosCacheTimedRefresh(void *handle) { continue; } - pthread_mutex_unlock(&guard); - if ((count % pCacheObj->checkTick) != 0) { continue; } @@ -741,6 +739,8 @@ void* taosCacheTimedRefresh(void *handle) { } taosTrashcanEmpty(pCacheObj, false); + + pthread_mutex_unlock(&guard); } } From 8931394a8d6e9d19cb45e32f4ebdac0319533677 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 24 Aug 2021 13:38:44 +0800 Subject: [PATCH 151/165] [TD-6303]: taosdemo -f miss filename. (#7539) --- src/kit/taosdemo/taosdemo.c | 499 +++++++++++++++++------------------- 1 file changed, 239 insertions(+), 260 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 50f35faa63..5d851eafd0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -659,6 +659,13 @@ static FILE * g_fpOfInsertResult = NULL; fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) #define errorPrint(fmt, ...) \ + do {\ + fprintf(stderr, " \033[31m");\ + fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\ + fprintf(stderr, " \033[0m");\ + } while(0) + +#define errorPrint2(fmt, ...) \ do {\ struct tm Tm, *ptm;\ struct timeval timeSecs; \ @@ -671,8 +678,8 @@ static FILE * g_fpOfInsertResult = NULL; ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,\ ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec,\ taosGetSelfPthreadId());\ - fprintf(stderr, "ERROR: "fmt, __VA_ARGS__);\ fprintf(stderr, " \033[0m");\ + errorPrint(fmt, __VA_ARGS__);\ } while(0) // for strncpy buffer overflow @@ -815,6 +822,12 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-f") == 0) { arguments->demo_mode = false; + + if (NULL == argv[i+1]) { + printHelp(); + errorPrint("%s", "\n\t-f need a valid json file following!\n"); + exit(EXIT_FAILURE); + } arguments->metaFile = argv[++i]; } else if (strcmp(argv[i], "-c") == 0) { if (argc == i+1) { @@ -1227,7 +1240,7 @@ static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); if (code != 0) { if (!quiet) { - errorPrint("Failed to execute %s, reason: %s\n", + errorPrint2("Failed to execute %s, reason: %s\n", command, taos_errstr(res)); } taos_free_result(res); @@ -1249,7 +1262,7 @@ static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo) { pThreadInfo->fp = fopen(pThreadInfo->filePath, "at"); if (pThreadInfo->fp == NULL) { - errorPrint( + errorPrint2( "%s() LN%d, failed to open result file: %s, result will not save to file\n", __func__, __LINE__, pThreadInfo->filePath); return; @@ -1268,7 +1281,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { char* databuf = (char*) calloc(1, 100*1024*1024); if (databuf == NULL) { - errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", + errorPrint2("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", __func__, __LINE__); return ; } @@ -1308,7 +1321,7 @@ static void selectAndGetResult( if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { TAOS_RES *res = taos_query(pThreadInfo->taos, command); if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", + errorPrint2("%s() LN%d, failed to execute sql:%s, reason:%s\n", __func__, __LINE__, command, taos_errstr(res)); taos_free_result(res); return; @@ -1327,7 +1340,7 @@ static void selectAndGetResult( } } else { - errorPrint("%s() LN%d, unknown query mode: %s\n", + errorPrint2("%s() LN%d, unknown query mode: %s\n", __func__, __LINE__, g_queryInfo.queryMode); } } @@ -2177,7 +2190,7 @@ static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { FILE* fp = fopen(fname, "at"); if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file: %s\n", + errorPrint2("%s() LN%d, failed to open file: %s\n", __func__, __LINE__, fname); return -1; } @@ -2224,7 +2237,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { int32_t code = taos_errno(res); if (code != 0) { - errorPrint( "failed to run , reason: %s\n", + errorPrint2("failed to run , reason: %s\n", taos_errstr(res)); return -1; } @@ -2240,7 +2253,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); if (dbInfos[count] == NULL) { - errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count); + errorPrint2("failed to allocate memory for some dbInfo[%d]\n", count); return -1; } @@ -2393,7 +2406,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port request_buf = malloc(req_buf_len); if (NULL == request_buf) { - errorPrint("%s", "ERROR, cannot allocate memory.\n"); + errorPrint("%s", "cannot allocate memory.\n"); exit(EXIT_FAILURE); } @@ -2532,7 +2545,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { - errorPrint("%s() LN%d, calloc failed! size:%d\n", + errorPrint2("%s() LN%d, calloc failed! size:%d\n", __func__, __LINE__, TSDB_MAX_SQL_LEN+1); return NULL; } @@ -2632,7 +2645,7 @@ static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "%"PRId64",", rand_bigint()); } else { - errorPrint("No support data type: %s\n", stbInfo->tags[i].dataType); + errorPrint2("No support data type: %s\n", stbInfo->tags[i].dataType); tmfree(dataBuf); return NULL; } @@ -2671,7 +2684,7 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { lenOfOneRow += TIMESTAMP_BUFF_LEN; } else { - errorPrint("get error data type : %s\n", dataType); + errorPrint2("get error data type : %s\n", dataType); exit(EXIT_FAILURE); } } @@ -2702,7 +2715,7 @@ static int calcRowLen(SSuperTable* superTbls) { } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + DOUBLE_BUFF_LEN; } else { - errorPrint("get error tag type : %s\n", dataType); + errorPrint2("get error tag type : %s\n", dataType); exit(EXIT_FAILURE); } } @@ -2739,7 +2752,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (code != 0) { taos_free_result(res); taos_close(taos); - errorPrint("%s() LN%d, failed to run command %s\n", + errorPrint2("%s() LN%d, failed to run command %s\n", __func__, __LINE__, command); exit(EXIT_FAILURE); } @@ -2751,7 +2764,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (NULL == childTblName) { taos_free_result(res); taos_close(taos); - errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); + errorPrint2("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); exit(EXIT_FAILURE); } } @@ -2761,7 +2774,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, int32_t* len = taos_fetch_lengths(res); if (0 == strlen((char *)row[0])) { - errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", + errorPrint2("%s() LN%d, No.%"PRId64" table return empty name\n", __func__, __LINE__, count); exit(EXIT_FAILURE); } @@ -2782,7 +2795,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, tmfree(childTblName); taos_free_result(res); taos_close(taos); - errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", + errorPrint2("%s() LN%d, realloc fail for save child table name of %s.%s\n", __func__, __LINE__, dbName, sTblName); exit(EXIT_FAILURE); } @@ -2879,7 +2892,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, int childTblCount = 10000; superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); if (superTbls->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); return -1; } getAllChildNameOfSuperTable(taos, dbName, @@ -2905,7 +2918,7 @@ static int createSuperTable( int lenOfOneRow = 0; if (superTbl->columnCount == 0) { - errorPrint("%s() LN%d, super table column count is %d\n", + errorPrint2("%s() LN%d, super table column count is %d\n", __func__, __LINE__, superTbl->columnCount); free(command); return -1; @@ -2969,7 +2982,7 @@ static int createSuperTable( } else { taos_close(taos); free(command); - errorPrint("%s() LN%d, config error data type : %s\n", + errorPrint2("%s() LN%d, config error data type : %s\n", __func__, __LINE__, dataType); exit(EXIT_FAILURE); } @@ -2982,7 +2995,7 @@ static int createSuperTable( if (NULL == superTbl->colsOfCreateChildTable) { taos_close(taos); free(command); - errorPrint("%s() LN%d, Failed when calloc, size:%d", + errorPrint2("%s() LN%d, Failed when calloc, size:%d", __func__, __LINE__, len+1); exit(EXIT_FAILURE); } @@ -2992,7 +3005,7 @@ static int createSuperTable( __func__, __LINE__, superTbl->colsOfCreateChildTable); if (superTbl->tagCount == 0) { - errorPrint("%s() LN%d, super table tag count is %d\n", + errorPrint2("%s() LN%d, super table tag count is %d\n", __func__, __LINE__, superTbl->tagCount); free(command); return -1; @@ -3059,7 +3072,7 @@ static int createSuperTable( } else { taos_close(taos); free(command); - errorPrint("%s() LN%d, config error tag type : %s\n", + errorPrint2("%s() LN%d, config error tag type : %s\n", __func__, __LINE__, dataType); exit(EXIT_FAILURE); } @@ -3074,7 +3087,7 @@ static int createSuperTable( "create table if not exists %s.%s (ts timestamp%s) tags %s", dbName, superTbl->sTblName, cols, tags); if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - errorPrint( "create supertable %s failed!\n\n", + errorPrint2("create supertable %s failed!\n\n", superTbl->sTblName); free(command); return -1; @@ -3090,7 +3103,7 @@ int createDatabasesAndStables(char *command) { int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); return -1; } @@ -3186,7 +3199,7 @@ int createDatabasesAndStables(char *command) { if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { taos_close(taos); - errorPrint( "\ncreate database %s failed!\n\n", + errorPrint("\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); return -1; } @@ -3216,7 +3229,7 @@ int createDatabasesAndStables(char *command) { ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j]); if (0 != ret) { - errorPrint("\nget super table %s.%s info failed!\n\n", + errorPrint2("\nget super table %s.%s info failed!\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); continue; } @@ -3244,7 +3257,7 @@ static void* createTable(void *sarg) pThreadInfo->buffer = calloc(buff_len, 1); if (pThreadInfo->buffer == NULL) { - errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); + errorPrint2("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); exit(EXIT_FAILURE); } @@ -3266,7 +3279,7 @@ static void* createTable(void *sarg) } else { if (stbInfo == NULL) { free(pThreadInfo->buffer); - errorPrint("%s() LN%d, use metric, but super table info is NULL\n", + errorPrint2("%s() LN%d, use metric, but super table info is NULL\n", __func__, __LINE__); exit(EXIT_FAILURE); } else { @@ -3313,7 +3326,7 @@ static void* createTable(void *sarg) len = 0; if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, NO_INSERT_TYPE, false)){ - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); free(pThreadInfo->buffer); return NULL; } @@ -3329,7 +3342,7 @@ static void* createTable(void *sarg) if (0 != len) { if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, NO_INSERT_TYPE, false)) { - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); } } @@ -3374,7 +3387,7 @@ static int startMultiThreadCreateChildTable( db_name, g_Dbs.port); if (pThreadInfo->taos == NULL) { - errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", + errorPrint2("%s() LN%d, Failed to connect to TDengine, reason:%s\n", __func__, __LINE__, taos_errstr(NULL)); free(pids); free(infos); @@ -3549,7 +3562,7 @@ static int readSampleFromCsvFileToMem( FILE* fp = fopen(stbInfo->sampleFile, "r"); if (fp == NULL) { - errorPrint( "Failed to open sample file: %s, reason:%s\n", + errorPrint("Failed to open sample file: %s, reason:%s\n", stbInfo->sampleFile, strerror(errno)); return -1; } @@ -3561,7 +3574,7 @@ static int readSampleFromCsvFileToMem( readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { - errorPrint( "Failed to fseek file: %s, reason:%s\n", + errorPrint("Failed to fseek file: %s, reason:%s\n", stbInfo->sampleFile, strerror(errno)); fclose(fp); return -1; @@ -3604,7 +3617,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // columns cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); if (columns && columns->type != cJSON_Array) { - printf("ERROR: failed to read json, columns not found\n"); + errorPrint("%s", "failed to read json, columns not found\n"); goto PARSE_OVER; } else if (NULL == columns) { superTbls->columnCount = 0; @@ -3614,8 +3627,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( int columnSize = cJSON_GetArraySize(columns); if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) { - errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", - __func__, __LINE__, TSDB_MAX_COLUMNS); + errorPrint("failed to read json, column size overflow, max column size is %d\n", + TSDB_MAX_COLUMNS); goto PARSE_OVER; } @@ -3633,8 +3646,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (countObj && countObj->type == cJSON_Number) { count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column count not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, column count not found\n"); goto PARSE_OVER; } else { count = 1; @@ -3645,8 +3657,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( cJSON *dataType = cJSON_GetObjectItem(column, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - errorPrint("%s() LN%d: failed to read json, column type not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, column type not found\n"); goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN); @@ -3674,8 +3685,8 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) { - errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n", - __func__, __LINE__, MAX_NUM_COLUMNS); + errorPrint("failed to read json, column size overflow, allowed max column size is %d\n", + MAX_NUM_COLUMNS); goto PARSE_OVER; } @@ -3686,15 +3697,14 @@ static bool getColumnAndTagTypeFromInsertJsonFile( // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, tags not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, tags not found\n"); goto PARSE_OVER; } int tagSize = cJSON_GetArraySize(tags); if (tagSize > TSDB_MAX_TAGS) { - errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", - __func__, __LINE__, TSDB_MAX_TAGS); + errorPrint("failed to read json, tags size overflow, max tag size is %d\n", + TSDB_MAX_TAGS); goto PARSE_OVER; } @@ -3708,7 +3718,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (countObj && countObj->type == cJSON_Number) { count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { - printf("ERROR: failed to read json, column count not found\n"); + errorPrint("%s", "failed to read json, column count not found\n"); goto PARSE_OVER; } else { count = 1; @@ -3719,8 +3729,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( cJSON *dataType = cJSON_GetObjectItem(tag, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, tag type not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, tag type not found\n"); goto PARSE_OVER; } tstrncpy(columnCase.dataType, dataType->valuestring, @@ -3730,8 +3739,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (dataLen && dataLen->type == cJSON_Number) { columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column len not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, column len not found\n"); goto PARSE_OVER; } else { columnCase.dataLen = 0; @@ -3746,16 +3754,16 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } if (index > TSDB_MAX_TAGS) { - errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n", - __func__, __LINE__, TSDB_MAX_TAGS); + errorPrint("failed to read json, tags size overflow, allowed max tag count is %d\n", + TSDB_MAX_TAGS); goto PARSE_OVER; } superTbls->tagCount = index; if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) { - errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n", - __func__, __LINE__, TSDB_MAX_COLUMNS); + errorPrint("columns + tags is more than allowed max columns count: %d\n", + TSDB_MAX_COLUMNS); goto PARSE_OVER; } ret = true; @@ -3778,7 +3786,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!host) { tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { - printf("ERROR: failed to read json, host not found\n"); + errorPrint("%s", "failed to read json, host not found\n"); goto PARSE_OVER; } @@ -3816,7 +3824,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!threads) { g_Dbs.threadCount = 1; } else { - printf("ERROR: failed to read json, threads not found\n"); + errorPrint("%s", "failed to read json, threads not found\n"); goto PARSE_OVER; } @@ -3826,32 +3834,28 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!threads2) { g_Dbs.threadCountByCreateTbl = 1; } else { - errorPrint("%s() LN%d, failed to read json, threads2 not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, threads2 not found\n"); goto PARSE_OVER; } cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); if (gInsertInterval && gInsertInterval->type == cJSON_Number) { if (gInsertInterval->valueint <0) { - errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert interval input mistake\n"); goto PARSE_OVER; } g_args.insert_interval = gInsertInterval->valueint; } else if (!gInsertInterval) { g_args.insert_interval = 0; } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); goto PARSE_OVER; } cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); if (interlaceRows && interlaceRows->type == cJSON_Number) { if (interlaceRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); goto PARSE_OVER; } @@ -3859,8 +3863,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!interlaceRows) { g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, interlace_rows input mistake\n"); goto PARSE_OVER; } @@ -3933,14 +3936,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* dbs = cJSON_GetObjectItem(root, "databases"); if (!dbs || dbs->type != cJSON_Array) { - printf("ERROR: failed to read json, databases not found\n"); + errorPrint("%s", "failed to read json, databases not found\n"); goto PARSE_OVER; } int dbSize = cJSON_GetArraySize(dbs); if (dbSize > MAX_DB_COUNT) { errorPrint( - "ERROR: failed to read json, databases size overflow, max database is %d\n", + "failed to read json, databases size overflow, max database is %d\n", MAX_DB_COUNT); goto PARSE_OVER; } @@ -3953,13 +3956,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // dbinfo cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); if (!dbinfo || dbinfo->type != cJSON_Object) { - printf("ERROR: failed to read json, dbinfo not found\n"); + errorPrint("%s", "failed to read json, dbinfo not found\n"); goto PARSE_OVER; } cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { - printf("ERROR: failed to read json, db name not found\n"); + errorPrint("%s", "failed to read json, db name not found\n"); goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); @@ -3974,8 +3977,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!drop) { g_Dbs.db[i].drop = g_args.drop_database; } else { - errorPrint("%s() LN%d, failed to read json, drop input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, drop input mistake\n"); goto PARSE_OVER; } @@ -3987,7 +3989,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!precision) { memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN); } else { - printf("ERROR: failed to read json, precision not found\n"); + errorPrint("%s", "failed to read json, precision not found\n"); goto PARSE_OVER; } @@ -3997,7 +3999,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!update) { g_Dbs.db[i].dbCfg.update = -1; } else { - printf("ERROR: failed to read json, update not found\n"); + errorPrint("%s", "failed to read json, update not found\n"); goto PARSE_OVER; } @@ -4007,7 +4009,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!replica) { g_Dbs.db[i].dbCfg.replica = -1; } else { - printf("ERROR: failed to read json, replica not found\n"); + errorPrint("%s", "failed to read json, replica not found\n"); goto PARSE_OVER; } @@ -4017,7 +4019,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!keep) { g_Dbs.db[i].dbCfg.keep = -1; } else { - printf("ERROR: failed to read json, keep not found\n"); + errorPrint("%s", "failed to read json, keep not found\n"); goto PARSE_OVER; } @@ -4027,7 +4029,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!days) { g_Dbs.db[i].dbCfg.days = -1; } else { - printf("ERROR: failed to read json, days not found\n"); + errorPrint("%s", "failed to read json, days not found\n"); goto PARSE_OVER; } @@ -4037,7 +4039,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!cache) { g_Dbs.db[i].dbCfg.cache = -1; } else { - printf("ERROR: failed to read json, cache not found\n"); + errorPrint("%s", "failed to read json, cache not found\n"); goto PARSE_OVER; } @@ -4047,7 +4049,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!blocks) { g_Dbs.db[i].dbCfg.blocks = -1; } else { - printf("ERROR: failed to read json, block not found\n"); + errorPrint("%s", "failed to read json, block not found\n"); goto PARSE_OVER; } @@ -4067,7 +4069,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!minRows) { g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default } else { - printf("ERROR: failed to read json, minRows not found\n"); + errorPrint("%s", "failed to read json, minRows not found\n"); goto PARSE_OVER; } @@ -4077,7 +4079,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxRows) { g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default } else { - printf("ERROR: failed to read json, maxRows not found\n"); + errorPrint("%s", "failed to read json, maxRows not found\n"); goto PARSE_OVER; } @@ -4087,7 +4089,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!comp) { g_Dbs.db[i].dbCfg.comp = -1; } else { - printf("ERROR: failed to read json, comp not found\n"); + errorPrint("%s", "failed to read json, comp not found\n"); goto PARSE_OVER; } @@ -4097,7 +4099,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!walLevel) { g_Dbs.db[i].dbCfg.walLevel = -1; } else { - printf("ERROR: failed to read json, walLevel not found\n"); + errorPrint("%s", "failed to read json, walLevel not found\n"); goto PARSE_OVER; } @@ -4107,7 +4109,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!cacheLast) { g_Dbs.db[i].dbCfg.cacheLast = -1; } else { - printf("ERROR: failed to read json, cacheLast not found\n"); + errorPrint("%s", "failed to read json, cacheLast not found\n"); goto PARSE_OVER; } @@ -4127,24 +4129,22 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!fsync) { g_Dbs.db[i].dbCfg.fsync = -1; } else { - errorPrint("%s() LN%d, failed to read json, fsync input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, fsync input mistake\n"); goto PARSE_OVER; } // super_talbes cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); if (!stables || stables->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, super_tables not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, super_tables not found\n"); goto PARSE_OVER; } int stbSize = cJSON_GetArraySize(stables); if (stbSize > MAX_SUPER_TABLE_COUNT) { errorPrint( - "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n", - __func__, __LINE__, MAX_SUPER_TABLE_COUNT); + "failed to read json, supertable size overflow, max supertable is %d\n", + MAX_SUPER_TABLE_COUNT); goto PARSE_OVER; } @@ -4157,8 +4157,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, stb name not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, stb name not found\n"); goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, @@ -4166,7 +4165,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { - printf("ERROR: failed to read json, childtable_prefix not found\n"); + errorPrint("%s", "failed to read json, childtable_prefix not found\n"); goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, @@ -4187,7 +4186,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!autoCreateTbl) { g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; } else { - printf("ERROR: failed to read json, auto_create_table not found\n"); + errorPrint("%s", "failed to read json, auto_create_table not found\n"); goto PARSE_OVER; } @@ -4197,7 +4196,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!batchCreateTbl) { g_Dbs.db[i].superTbls[j].batchCreateTableNum = 1000; } else { - printf("ERROR: failed to read json, batch_create_tbl_num not found\n"); + errorPrint("%s", "failed to read json, batch_create_tbl_num not found\n"); goto PARSE_OVER; } @@ -4217,8 +4216,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!childTblExists) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { - errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n", - __func__, __LINE__); + errorPrint("%s", + "failed to read json, child_table_exists not found\n"); goto PARSE_OVER; } @@ -4228,8 +4227,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n", - __func__, __LINE__); + errorPrint("%s", + "failed to read json, childtable_count input mistake\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; @@ -4244,8 +4243,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", min(SMALL_BUFF_LEN, strlen("rand") + 1)); } else { - errorPrint("%s() LN%d, failed to read json, data_source not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, data_source not found\n"); goto PARSE_OVER; } @@ -4259,8 +4257,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; } else { - errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n", - __func__, __LINE__, stbIface->valuestring); + errorPrint("failed to read json, insert_mode %s not recognized\n", + stbIface->valuestring); goto PARSE_OVER; } } else if (!stbIface) { @@ -4274,7 +4272,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if ((childTbl_limit) && (g_Dbs.db[i].drop != true) && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_limit->type != cJSON_Number) { - printf("ERROR: failed to read json, childtable_limit\n"); + errorPrint("%s", "failed to read json, childtable_limit\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; @@ -4287,7 +4285,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if ((childTbl_offset->type != cJSON_Number) || (0 > childTbl_offset->valueint)) { - printf("ERROR: failed to read json, childtable_offset\n"); + errorPrint("%s", "failed to read json, childtable_offset\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint; @@ -4303,7 +4301,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, "now", TSDB_DB_NAME_LEN); } else { - printf("ERROR: failed to read json, start_timestamp not found\n"); + errorPrint("%s", "failed to read json, start_timestamp not found\n"); goto PARSE_OVER; } @@ -4313,7 +4311,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!timestampStep) { g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step; } else { - printf("ERROR: failed to read json, timestamp_step not found\n"); + errorPrint("%s", "failed to read json, timestamp_step not found\n"); goto PARSE_OVER; } @@ -4328,7 +4326,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", SMALL_BUFF_LEN); } else { - printf("ERROR: failed to read json, sample_format not found\n"); + errorPrint("%s", "failed to read json, sample_format not found\n"); goto PARSE_OVER; } @@ -4343,7 +4341,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN); } else { - printf("ERROR: failed to read json, sample_file not found\n"); + errorPrint("%s", "failed to read json, sample_file not found\n"); goto PARSE_OVER; } @@ -4361,7 +4359,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); g_Dbs.db[i].superTbls[j].tagSource = 0; } else { - printf("ERROR: failed to read json, tags_file not found\n"); + errorPrint("%s", "failed to read json, tags_file not found\n"); goto PARSE_OVER; } @@ -4377,8 +4375,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxSqlLen) { g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { - errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, stbMaxSqlLen input mistake\n"); goto PARSE_OVER; } /* @@ -4395,31 +4392,28 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!multiThreadWriteOneTbl) { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; } else { - printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n"); + errorPrint("%s", "failed to read json, multiThreadWriteOneTbl not found\n"); goto PARSE_OVER; } */ cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); if (insertRows && insertRows->type == cJSON_Number) { if (insertRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_rows input mistake\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; } else if (!insertRows) { g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; } else { - errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_rows input mistake\n"); goto PARSE_OVER; } cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { if (stbInterlaceRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, interlace rows input mistake\n"); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint; @@ -4437,8 +4431,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { errorPrint( - "%s() LN%d, failed to read json, interlace rows input mistake\n", - __func__, __LINE__); + "%s", "failed to read json, interlace rows input mistake\n"); goto PARSE_OVER; } @@ -4454,7 +4447,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!disorderRatio) { g_Dbs.db[i].superTbls[j].disorderRatio = 0; } else { - printf("ERROR: failed to read json, disorderRatio not found\n"); + errorPrint("%s", "failed to read json, disorderRatio not found\n"); goto PARSE_OVER; } @@ -4464,7 +4457,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!disorderRange) { g_Dbs.db[i].superTbls[j].disorderRange = 1000; } else { - printf("ERROR: failed to read json, disorderRange not found\n"); + errorPrint("%s", "failed to read json, disorderRange not found\n"); goto PARSE_OVER; } @@ -4472,8 +4465,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (insertInterval && insertInterval->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; if (insertInterval->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); goto PARSE_OVER; } } else if (!insertInterval) { @@ -4481,8 +4473,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, insert_interval input mistake\n"); goto PARSE_OVER; } @@ -4514,7 +4505,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!host) { tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { - printf("ERROR: failed to read json, host not found\n"); + errorPrint("%s", "failed to read json, host not found\n"); goto PARSE_OVER; } @@ -4552,23 +4543,21 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (!answerPrompt) { g_args.answer_yes = false; } else { - printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); + errorPrint("%s", "failed to read json, confirm_parameter_prompt not found\n"); goto PARSE_OVER; } cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); if (gQueryTimes && gQueryTimes->type == cJSON_Number) { if (gQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + errorPrint("%s()", "failed to read json, query_times input mistake\n"); goto PARSE_OVER; } g_args.query_times = gQueryTimes->valueint; } else if (!gQueryTimes) { g_args.query_times = 1; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, query_times input mistake\n"); goto PARSE_OVER; } @@ -4576,7 +4565,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); } else if (!dbs) { - printf("ERROR: failed to read json, databases not found\n"); + errorPrint("%s", "failed to read json, databases not found\n"); goto PARSE_OVER; } @@ -4590,7 +4579,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.queryMode, "taosc", min(SMALL_BUFF_LEN, strlen("taosc") + 1)); } else { - printf("ERROR: failed to read json, query_mode not found\n"); + errorPrint("%s", "failed to read json, query_mode not found\n"); goto PARSE_OVER; } @@ -4600,7 +4589,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.concurrent = 1; g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (specifiedQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, super_table_query not found\n"); + errorPrint("%s", "failed to read json, super_table_query not found\n"); goto PARSE_OVER; } else { cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); @@ -4615,8 +4604,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { if (specifiedQueryTimes->valueint <= 0) { errorPrint( - "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, specifiedQueryTimes->valueint); + "failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + specifiedQueryTimes->valueint); goto PARSE_OVER; } @@ -4633,8 +4622,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (concurrent && concurrent->type == cJSON_Number) { if (concurrent->valueint <= 0) { errorPrint( - "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", - __func__, __LINE__, + "query sqlCount %d or concurrent %d is not correct.\n", g_queryInfo.specifiedQueryInfo.sqlCount, g_queryInfo.specifiedQueryInfo.concurrent); goto PARSE_OVER; @@ -4652,8 +4640,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; } else { - errorPrint("%s() LN%d, failed to read json, async mode input error\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, async mode input error\n"); goto PARSE_OVER; } } else { @@ -4676,7 +4663,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("no", restart->valuestring)) { g_queryInfo.specifiedQueryInfo.subscribeRestart = false; } else { - printf("ERROR: failed to read json, subscribe restart error\n"); + errorPrint("%s", "failed to read json, subscribe restart error\n"); goto PARSE_OVER; } } else { @@ -4692,7 +4679,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("no", keepProgress->valuestring)) { g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; } else { - printf("ERROR: failed to read json, subscribe keepProgress error\n"); + errorPrint("%s", "failed to read json, subscribe keepProgress error\n"); goto PARSE_OVER; } } else { @@ -4704,15 +4691,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!specifiedSqls) { g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (specifiedSqls->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, super sqls not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, super sqls not found\n"); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(specifiedSqls); if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent > MAX_QUERY_SQL_COUNT) { - errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", - __func__, __LINE__, + errorPrint("failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", superSqlSize, g_queryInfo.specifiedQueryInfo.concurrent, MAX_QUERY_SQL_COUNT); @@ -4726,7 +4711,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("ERROR: failed to read json, sql not found\n"); + errorPrint("%s", "failed to read json, sql not found\n"); goto PARSE_OVER; } tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], @@ -4766,7 +4751,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { memset(g_queryInfo.specifiedQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { - printf("ERROR: failed to read json, super query result file not found\n"); + errorPrint("%s", + "failed to read json, super query result file not found\n"); goto PARSE_OVER; } } @@ -4779,7 +4765,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.threadCnt = 1; g_queryInfo.superQueryInfo.sqlCount = 0; } else if (superQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, sub_table_query not found\n"); + errorPrint("%s", "failed to read json, sub_table_query not found\n"); ret = true; goto PARSE_OVER; } else { @@ -4793,24 +4779,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); if (superQueryTimes && superQueryTimes->type == cJSON_Number) { if (superQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, superQueryTimes->valueint); + errorPrint("failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + superQueryTimes->valueint); goto PARSE_OVER; } g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; } else if (!superQueryTimes) { g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, query_times input mistake\n"); goto PARSE_OVER; } cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); if (threads && threads->type == cJSON_Number) { if (threads->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, threads input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, threads input mistake\n"); goto PARSE_OVER; } @@ -4832,8 +4816,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, TSDB_TABLE_NAME_LEN); } else { - errorPrint("%s() LN%d, failed to read json, super table name input error\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, super table name input error\n"); goto PARSE_OVER; } @@ -4845,8 +4828,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("async", superAsyncMode->valuestring)) { g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; } else { - errorPrint("%s() LN%d, failed to read json, async mode input error\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, async mode input error\n"); goto PARSE_OVER; } } else { @@ -4856,8 +4838,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); if (superInterval && superInterval->type == cJSON_Number) { if (superInterval->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interval input mistake\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, interval input mistake\n"); goto PARSE_OVER; } g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; @@ -4875,7 +4856,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("no", subrestart->valuestring)) { g_queryInfo.superQueryInfo.subscribeRestart = false; } else { - printf("ERROR: failed to read json, subscribe restart error\n"); + errorPrint("%s", "failed to read json, subscribe restart error\n"); goto PARSE_OVER; } } else { @@ -4891,7 +4872,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (0 == strcmp("no", superkeepProgress->valuestring)) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; } else { - printf("ERROR: failed to read json, subscribe super table keepProgress error\n"); + errorPrint("%s", + "failed to read json, subscribe super table keepProgress error\n"); goto PARSE_OVER; } } else { @@ -4928,14 +4910,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!superSqls) { g_queryInfo.superQueryInfo.sqlCount = 0; } else if (superSqls->type != cJSON_Array) { - errorPrint("%s() LN%d: failed to read json, super sqls not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, super sqls not found\n"); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(superSqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { - errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", - __func__, __LINE__, MAX_QUERY_SQL_COUNT); + errorPrint("failed to read json, query sql size overflow, max is %d\n", + MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -4947,8 +4928,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, sql not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, sql not found\n"); goto PARSE_OVER; } tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, @@ -4962,8 +4942,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else if (NULL == result) { memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { - errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", - __func__, __LINE__); + errorPrint("%s", "failed to read json, sub query result file not found\n"); goto PARSE_OVER; } } @@ -4981,7 +4960,7 @@ static bool getInfoFromJsonFile(char* file) { FILE *fp = fopen(file, "r"); if (!fp) { - printf("failed to read %s, reason:%s\n", file, strerror(errno)); + errorPrint("failed to read %s, reason:%s\n", file, strerror(errno)); return false; } @@ -4992,14 +4971,14 @@ static bool getInfoFromJsonFile(char* file) { if (len <= 0) { free(content); fclose(fp); - printf("failed to read %s, content is null", file); + errorPrint("failed to read %s, content is null", file); return false; } content[len] = 0; cJSON* root = cJSON_Parse(content); if (root == NULL) { - printf("ERROR: failed to cjson parse %s, invalid json format\n", file); + errorPrint("failed to cjson parse %s, invalid json format\n", file); goto PARSE_OVER; } @@ -5012,13 +4991,13 @@ static bool getInfoFromJsonFile(char* file) { } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { g_args.test_mode = SUBSCRIBE_TEST; } else { - printf("ERROR: failed to read json, filetype not support\n"); + errorPrint("%s", "failed to read json, filetype not support\n"); goto PARSE_OVER; } } else if (!filetype) { g_args.test_mode = INSERT_TEST; } else { - printf("ERROR: failed to read json, filetype not found\n"); + errorPrint("%s", "failed to read json, filetype not found\n"); goto PARSE_OVER; } @@ -5028,8 +5007,8 @@ static bool getInfoFromJsonFile(char* file) { || (SUBSCRIBE_TEST == g_args.test_mode)) { ret = getMetaFromQueryJsonFile(root); } else { - errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", - __func__, __LINE__); + errorPrint("%s", + "input json file type error! please input correct file type: insert or query or subscribe\n"); goto PARSE_OVER; } @@ -5147,7 +5126,7 @@ static int64_t generateStbRowData( || (0 == strncasecmp(stbInfo->columns[i].dataType, "NCHAR", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "binary or nchar length overflow, max size:%u\n", + errorPrint2("binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -5159,7 +5138,7 @@ static int64_t generateStbRowData( } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { - errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); + errorPrint2("calloc failed! size:%d\n", stbInfo->columns[i].dataLen); return -1; } rand_string(buf, stbInfo->columns[i].dataLen); @@ -5222,7 +5201,7 @@ static int64_t generateStbRowData( tmpLen = strlen(tmp); tstrncpy(pstr + dataLen, tmp, min(tmpLen +1, BIGINT_BUFF_LEN)); } else { - errorPrint( "Not support data type: %s\n", + errorPrint2("Not support data type: %s\n", stbInfo->columns[i].dataType); return -1; } @@ -5274,7 +5253,7 @@ static int64_t generateData(char *recBuf, char **data_type, } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) { char *s = malloc(lenOfBinary + 1); if (s == NULL) { - errorPrint("%s() LN%d, memory allocation %d bytes failed\n", + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); exit(EXIT_FAILURE); } @@ -5284,7 +5263,7 @@ static int64_t generateData(char *recBuf, char **data_type, } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) { char *s = malloc(lenOfBinary + 1); if (s == NULL) { - errorPrint("%s() LN%d, memory allocation %d bytes failed\n", + errorPrint2("%s() LN%d, memory allocation %d bytes failed\n", __func__, __LINE__, lenOfBinary + 1); exit(EXIT_FAILURE); } @@ -5311,7 +5290,7 @@ static int prepareSampleDataForSTable(SSuperTable *stbInfo) { sampleDataBuf = calloc( stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", + errorPrint2("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, stbInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); @@ -5322,7 +5301,7 @@ static int prepareSampleDataForSTable(SSuperTable *stbInfo) { int ret = readSampleFromCsvFileToMem(stbInfo); if (0 != ret) { - errorPrint("%s() LN%d, read sample from csv file failed.\n", + errorPrint2("%s() LN%d, read sample from csv file failed.\n", __func__, __LINE__); tmfree(sampleDataBuf); stbInfo->sampleDataBuf = NULL; @@ -5377,7 +5356,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt); if (0 != taos_stmt_execute(pThreadInfo->stmt)) { - errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n", + errorPrint2("%s() LN%d, failied to execute insert statement. reason: %s\n", __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); @@ -5387,7 +5366,7 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) break; default: - errorPrint("%s() LN%d: unknown insert mode: %d\n", + errorPrint2("%s() LN%d: unknown insert mode: %d\n", __func__, __LINE__, stbInfo->iface); affectedRows = 0; } @@ -5615,7 +5594,7 @@ static int generateStbSQLHead( tableSeq % stbInfo->tagSampleCount); } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -5766,7 +5745,7 @@ static int32_t prepareStmtBindArrayByType( if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "binary length overflow, max size:%u\n", + errorPrint2("binary length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -5789,7 +5768,7 @@ static int32_t prepareStmtBindArrayByType( } else if (0 == strncasecmp(dataType, "NCHAR", strlen("NCHAR"))) { if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "nchar length overflow, max size:%u\n", + errorPrint2("nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -5937,7 +5916,7 @@ static int32_t prepareStmtBindArrayByType( value, &tmpEpoch, strlen(value), timePrec, 0)) { free(bind_ts2); - errorPrint("Input %s, time format error!\n", value); + errorPrint2("Input %s, time format error!\n", value); return -1; } *bind_ts2 = tmpEpoch; @@ -5953,7 +5932,7 @@ static int32_t prepareStmtBindArrayByType( bind->length = &bind->buffer_length; bind->is_null = NULL; } else { - errorPrint( "No support data type: %s\n", dataType); + errorPrint2("Not support data type: %s\n", dataType); return -1; } @@ -5970,7 +5949,7 @@ static int32_t prepareStmtBindArrayByTypeForRand( if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "binary length overflow, max size:%u\n", + errorPrint2("binary length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -5993,7 +5972,7 @@ static int32_t prepareStmtBindArrayByTypeForRand( } else if (0 == strncasecmp(dataType, "NCHAR", strlen("NCHAR"))) { if (dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "nchar length overflow, max size:%u\n", + errorPrint2("nchar length overflow, max size: %u\n", (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -6145,7 +6124,7 @@ static int32_t prepareStmtBindArrayByTypeForRand( if (TSDB_CODE_SUCCESS != taosParseTime( value, &tmpEpoch, strlen(value), timePrec, 0)) { - errorPrint("Input %s, time format error!\n", value); + errorPrint2("Input %s, time format error!\n", value); return -1; } *bind_ts2 = tmpEpoch; @@ -6163,7 +6142,7 @@ static int32_t prepareStmtBindArrayByTypeForRand( *ptr += bind->buffer_length; } else { - errorPrint( "No support data type: %s\n", dataType); + errorPrint2("No support data type: %s\n", dataType); return -1; } @@ -6181,7 +6160,7 @@ static int32_t prepareStmtWithoutStb( TAOS_STMT *stmt = pThreadInfo->stmt; int ret = taos_stmt_set_tbname(stmt, tableName); if (ret != 0) { - errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", + errorPrint2("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", tableName, ret, taos_stmt_errstr(stmt)); return ret; } @@ -6190,7 +6169,7 @@ static int32_t prepareStmtWithoutStb( char *bindArray = malloc(sizeof(TAOS_BIND) * (g_args.num_of_CPR + 1)); if (bindArray == NULL) { - errorPrint("Failed to allocate %d bind params\n", + errorPrint2("Failed to allocate %d bind params\n", (g_args.num_of_CPR + 1)); return -1; } @@ -6231,13 +6210,13 @@ static int32_t prepareStmtWithoutStb( } } if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { - errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); break; } // if msg > 3MB, break if (0 != taos_stmt_add_batch(stmt)) { - errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); break; } @@ -6260,7 +6239,7 @@ static int32_t prepareStbStmtBindTag( { char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -6292,7 +6271,7 @@ static int32_t prepareStbStmtBindRand( { char *bindBuffer = calloc(1, DOUBLE_BUFF_LEN); // g_args.len_of_binary); if (bindBuffer == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -6395,7 +6374,7 @@ static int32_t prepareStbStmtRand( } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -6403,7 +6382,7 @@ static int32_t prepareStbStmtRand( char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); if (NULL == tagsArray) { tmfree(tagsValBuf); - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -6422,14 +6401,14 @@ static int32_t prepareStbStmtRand( tmfree(tagsArray); if (0 != ret) { - errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } } else { ret = taos_stmt_set_tbname(stmt, tableName); if (0 != ret) { - errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } @@ -6437,7 +6416,7 @@ static int32_t prepareStbStmtRand( char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); if (bindArray == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", __func__, __LINE__, (stbInfo->columnCount + 1)); return -1; } @@ -6456,7 +6435,7 @@ static int32_t prepareStbStmtRand( } ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); if (0 != ret) { - errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); free(bindArray); return -1; @@ -6464,7 +6443,7 @@ static int32_t prepareStbStmtRand( // if msg > 3MB, break ret = taos_stmt_add_batch(stmt); if (0 != ret) { - errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); free(bindArray); return -1; @@ -6508,7 +6487,7 @@ static int32_t prepareStbStmtWithSample( } if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -6516,7 +6495,7 @@ static int32_t prepareStbStmtWithSample( char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); if (NULL == tagsArray) { tmfree(tagsValBuf); - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + errorPrint2("%s() LN%d, tag buf failed to allocate memory\n", __func__, __LINE__); return -1; } @@ -6535,14 +6514,14 @@ static int32_t prepareStbStmtWithSample( tmfree(tagsArray); if (0 != ret) { - errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } } else { ret = taos_stmt_set_tbname(stmt, tableName); if (0 != ret) { - errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } @@ -6564,14 +6543,14 @@ static int32_t prepareStbStmtWithSample( } ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); if (0 != ret) { - errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_bind_param() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } // if msg > 3MB, break ret = taos_stmt_add_batch(stmt); if (0 != ret) { - errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + errorPrint2("%s() LN%d, stmt_add_batch() failed! reason: %s\n", __func__, __LINE__, taos_stmt_errstr(stmt)); return -1; } @@ -6732,7 +6711,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->buffer = calloc(maxSqlLen, 1); if (NULL == pThreadInfo->buffer) { - errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", + errorPrint2( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -6780,7 +6759,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { getTableName(tableName, pThreadInfo, tableSeq); if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", + errorPrint2("[%d] %s() LN%d, getTableName return null\n", pThreadInfo->threadID, __func__, __LINE__); free(pThreadInfo->buffer); return NULL; @@ -6847,7 +6826,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { debugPrint("[%d] %s() LN%d, generated records is %d\n", pThreadInfo->threadID, __func__, __LINE__, generated); if (generated < 0) { - errorPrint("[%d] %s() LN%d, generated records is %d\n", + errorPrint2("[%d] %s() LN%d, generated records is %d\n", pThreadInfo->threadID, __func__, __LINE__, generated); goto free_of_interlace; } else if (generated == 0) { @@ -6901,7 +6880,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { startTs = taosGetTimestampUs(); if (recOfBatch == 0) { - errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n", + errorPrint2("[%d] %s() LN%d Failed to insert records of batch %d\n", pThreadInfo->threadID, __func__, __LINE__, batchPerTbl); if (batchPerTbl > 0) { @@ -6928,7 +6907,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->totalDelay += delay; if (recOfBatch != affectedRows) { - errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", + errorPrint2("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", pThreadInfo->threadID, __func__, __LINE__, recOfBatch, affectedRows, pThreadInfo->buffer); goto free_of_interlace; @@ -6986,7 +6965,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->buffer = calloc(maxSqlLen, 1); if (NULL == pThreadInfo->buffer) { - errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n", + errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n", maxSqlLen, strerror(errno)); return NULL; @@ -7027,7 +7006,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", + errorPrint2("[%d] %s() LN%d, getTableName return null\n", pThreadInfo->threadID, __func__, __LINE__); free(pThreadInfo->buffer); return NULL; @@ -7116,7 +7095,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalDelay += delay; if (affectedRows < 0) { - errorPrint("%s() LN%d, affected rows: %d\n", + errorPrint2("%s() LN%d, affected rows: %d\n", __func__, __LINE__, affectedRows); goto free_of_progressive; } @@ -7278,7 +7257,7 @@ static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in * uint16_t rest_port = port + TSDB_PORT_HTTP; struct hostent *server = gethostbyname(host); if ((server == NULL) || (server->h_addr == NULL)) { - errorPrint("%s", "ERROR, no such host"); + errorPrint2("%s", "no such host"); return -1; } @@ -7303,7 +7282,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) { stbInfo->sampleBindArray = calloc(1, sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); if (stbInfo->sampleBindArray == NULL) { - errorPrint("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", + errorPrint2("%s() LN%d, Failed to allocate %"PRIu64" bind array buffer\n", __func__, __LINE__, (uint64_t)sizeof(char *) * MAX_SAMPLES_ONCE_FROM_FILE); return -1; } @@ -7312,7 +7291,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) for (int i=0; i < MAX_SAMPLES_ONCE_FROM_FILE; i++) { char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); if (bindArray == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind params\n", __func__, __LINE__, (stbInfo->columnCount + 1)); return -1; } @@ -7344,7 +7323,7 @@ static int parseSampleFileToStmt(SSuperTable *stbInfo, uint32_t timePrec) char *bindBuffer = calloc(1, index + 1); if (bindBuffer == NULL) { - errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + errorPrint2("%s() LN%d, Failed to allocate %d bind buffer\n", __func__, __LINE__, DOUBLE_BUFF_LEN); return -1; } @@ -7382,7 +7361,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } else if (0 == strncasecmp(precision, "ns", 2)) { timePrec = TSDB_TIME_PRECISION_NANO; } else { - errorPrint("Not support precision: %s\n", precision); + errorPrint2("Not support precision: %s\n", precision); exit(EXIT_FAILURE); } } @@ -7412,7 +7391,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, if ((stbInfo) && (0 == strncasecmp(stbInfo->dataSource, "sample", strlen("sample")))) { if (0 != prepareSampleDataForSTable(stbInfo)) { - errorPrint("%s() LN%d, prepare sample data for stable failed!\n", + errorPrint2("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); exit(EXIT_FAILURE); } @@ -7422,7 +7401,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == taos0) { - errorPrint("%s() LN%d, connect to server fail , reason: %s\n", + errorPrint2("%s() LN%d, connect to server fail , reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); exit(EXIT_FAILURE); } @@ -7477,7 +7456,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, limit * TSDB_TABLE_NAME_LEN); if (stbInfo->childTblName == NULL) { taos_close(taos0); - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + errorPrint2("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); exit(EXIT_FAILURE); } @@ -7583,7 +7562,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, g_Dbs.password, db_name, g_Dbs.port); if (NULL == pThreadInfo->taos) { free(infos); - errorPrint( + errorPrint2( "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); @@ -7599,7 +7578,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (NULL == pThreadInfo->stmt) { free(pids); free(infos); - errorPrint( + errorPrint2( "%s() LN%d, failed init stmt, reason: %s\n", __func__, __LINE__, taos_errstr(NULL)); @@ -7611,7 +7590,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, free(pids); free(infos); free(stmtBuffer); - errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", + errorPrint2("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", ret, taos_stmt_errstr(pThreadInfo->stmt)); exit(EXIT_FAILURE); } @@ -7755,7 +7734,7 @@ static void *readTable(void *sarg) { char *tb_prefix = pThreadInfo->tb_prefix; FILE *fp = fopen(pThreadInfo->filePath, "a"); if (NULL == fp) { - errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + errorPrint2("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); free(command); return NULL; } @@ -7791,7 +7770,7 @@ static void *readTable(void *sarg) { int32_t code = taos_errno(pSql); if (code != 0) { - errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); + errorPrint2("Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); fclose(fp); @@ -7873,7 +7852,7 @@ static void *readMetric(void *sarg) { int32_t code = taos_errno(pSql); if (code != 0) { - errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); + errorPrint2("Failed to query:%s\n", taos_errstr(pSql)); taos_free_result(pSql); taos_close(taos); fclose(fp); @@ -7920,7 +7899,7 @@ static int insertTestProcess() { debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); if (NULL == g_fpOfInsertResult) { - errorPrint( "Failed to open %s for save result\n", g_Dbs.resultFile); + errorPrint("Failed to open %s for save result\n", g_Dbs.resultFile); return -1; } @@ -8022,7 +8001,7 @@ static void *specifiedTableQuery(void *sarg) { NULL, g_queryInfo.port); if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { @@ -8034,7 +8013,7 @@ static void *specifiedTableQuery(void *sarg) { sprintf(sqlStr, "use %s", g_queryInfo.dbName); if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(pThreadInfo->taos); - errorPrint( "use database %s failed!\n\n", + errorPrint("use database %s failed!\n\n", g_queryInfo.dbName); return NULL; } @@ -8200,7 +8179,7 @@ static int queryTestProcess() { NULL, g_queryInfo.port); if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", + errorPrint("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(EXIT_FAILURE); } @@ -8258,7 +8237,7 @@ static int queryTestProcess() { taos_close(taos); free(infos); free(pids); - errorPrint( "use database %s failed!\n\n", + errorPrint2("use database %s failed!\n\n", g_queryInfo.dbName); return -1; } @@ -8356,7 +8335,7 @@ static int queryTestProcess() { static void stable_sub_callback( TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", __func__, __LINE__, code, taos_errstr(res)); return; } @@ -8369,7 +8348,7 @@ static void stable_sub_callback( static void specified_sub_callback( TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + errorPrint2("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", __func__, __LINE__, code, taos_errstr(res)); return; } @@ -8408,7 +8387,7 @@ static TAOS_SUB* subscribeImpl( } if (tsub == NULL) { - errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql); + errorPrint2("failed to create subscription. topic:%s, sql:%s\n", topic, sql); return NULL; } @@ -8439,7 +8418,7 @@ static void *superSubscribe(void *sarg) { g_queryInfo.dbName, g_queryInfo.port); if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); free(subSqlStr); return NULL; @@ -8450,7 +8429,7 @@ static void *superSubscribe(void *sarg) { sprintf(sqlStr, "USE %s", g_queryInfo.dbName); if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(pThreadInfo->taos); - errorPrint( "use database %s failed!\n\n", + errorPrint2("use database %s failed!\n\n", g_queryInfo.dbName); free(subSqlStr); return NULL; @@ -8586,7 +8565,7 @@ static void *specifiedSubscribe(void *sarg) { g_queryInfo.dbName, g_queryInfo.port); if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + errorPrint2("[%d] Failed to connect to TDengine, reason:%s\n", pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } @@ -8693,7 +8672,7 @@ static int subscribeTestProcess() { g_queryInfo.dbName, g_queryInfo.port); if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", + errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(EXIT_FAILURE); } @@ -8721,7 +8700,7 @@ static int subscribeTestProcess() { g_queryInfo.specifiedQueryInfo.sqlCount); } else { if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", + errorPrint2("%s() LN%d, sepcified query sqlCount %d.\n", __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount); exit(EXIT_FAILURE); @@ -8738,7 +8717,7 @@ static int subscribeTestProcess() { g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { - errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); + errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); exit(EXIT_FAILURE); } @@ -8773,7 +8752,7 @@ static int subscribeTestProcess() { g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); if ((NULL == pidsOfStable) || (NULL == infosOfStable)) { - errorPrint("%s() LN%d, malloc failed for create threads\n", + errorPrint2("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); // taos_close(taos); exit(EXIT_FAILURE); @@ -9039,7 +9018,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile) memcpy(cmd + cmd_len, line, read_len); if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { - errorPrint("%s() LN%d, queryDbExec %s failed!\n", + errorPrint2("%s() LN%d, queryDbExec %s failed!\n", __func__, __LINE__, cmd); tmfree(cmd); tmfree(line); @@ -9113,7 +9092,7 @@ static void queryResult() { g_Dbs.port); if (pThreadInfo->taos == NULL) { free(pThreadInfo); - errorPrint( "Failed to connect to TDengine, reason:%s\n", + errorPrint2("Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); exit(EXIT_FAILURE); } @@ -9135,7 +9114,7 @@ static void testCmdLine() { if (strlen(configDir)) { wordexp_t full_path; if (wordexp(configDir, &full_path, 0) != 0) { - errorPrint( "Invalid path %s\n", configDir); + errorPrint("Invalid path %s\n", configDir); return; } taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); From 3b384d8203926216378298a31803b33dc2a640b7 Mon Sep 17 00:00:00 2001 From: cpvmrd Date: Tue, 24 Aug 2021 14:21:05 +0800 Subject: [PATCH 152/165] Update docs.md Change "IOT" to "IoT". --- documentation20/cn/01.evaluation/docs.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/01.evaluation/docs.md b/documentation20/cn/01.evaluation/docs.md index 2cc6033ccc..f5af3a4b8d 100644 --- a/documentation20/cn/01.evaluation/docs.md +++ b/documentation20/cn/01.evaluation/docs.md @@ -21,7 +21,7 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发 ## TDengine 总体适用场景 -作为一个 IOT 大数据平台,TDengine 的典型适用场景是在 IOT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。 +作为一个 IoT 大数据平台,TDengine 的典型适用场景是在 IoT 范畴,而且用户有一定的数据量。本文后续的介绍主要针对这个范畴里面的系统。范畴之外的系统,比如 CRM,ERP 等,不在本文讨论范围内。 ### 数据源特点和需求 @@ -54,7 +54,7 @@ TDengine 的模块之一是时序数据库。但除此之外,为减少研发 |系统性能需求|不适用|可能适用|非常适用|简单说明| |---|---|---|---|---| |要求较大的总体处理能力| | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。| -|要求高速处理数据 | | | √ | TDengine 的专门为 IOT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。| +|要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。| |要求快速处理小粒度数据| | | √ |这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。| ### 系统维护需求 From ac4d59a051413b59556e54b02ffbc8d178a69e3d Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 24 Aug 2021 14:27:56 +0800 Subject: [PATCH 153/165] [TD-6296]:When the timestamp column is filtered using the or field, the error content is abnormal --- src/query/inc/queryLog.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/query/inc/queryLog.h b/src/query/inc/queryLog.h index 5c48c43c45..87a221943a 100644 --- a/src/query/inc/queryLog.h +++ b/src/query/inc/queryLog.h @@ -24,10 +24,10 @@ extern "C" { extern uint32_t qDebugFlag; -#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0) -#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0) -#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} while(0) -#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0) +#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", qDebugFlag, __VA_ARGS__); }} while(0) +#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", qDebugFlag, __VA_ARGS__); }} while(0) +#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", qDebugFlag, __VA_ARGS__); }} while(0) +#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #define qDump(a, l) do { if (qDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)a, l); }} while(0) From d17c106e0f7a054e1a87ce6968d52bdb3fc7a410 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Tue, 24 Aug 2021 14:29:35 +0800 Subject: [PATCH 154/165] Update docs.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change “需要考虑库的设计,超级表和普通表的设计” to “需要考虑库、超级表和普通表的设计” --- documentation20/cn/04.model/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/04.model/docs.md b/documentation20/cn/04.model/docs.md index ed1d2f7168..45a4537d9b 100644 --- a/documentation20/cn/04.model/docs.md +++ b/documentation20/cn/04.model/docs.md @@ -2,7 +2,7 @@ # TDengine数据建模 -TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库的设计,超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 +TDengine采用关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。 关于数据建模请参考[视频教程](https://www.taosdata.com/blog/2020/11/11/1945.html)。 From 5aad68b300088e3e41822bdf520f09405991adf6 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 14:34:58 +0800 Subject: [PATCH 155/165] [TD-6169]: windows dll client can not quit. --- src/util/src/tcache.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 8d538e37bc..6863558c5c 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -537,8 +537,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for one second. - for (int i = 0; i < 20&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { + // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for 5 seconds. + for (int i = 0; i < 100&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } @@ -720,6 +720,8 @@ void* taosCacheTimedRefresh(void *handle) { continue; } + pthread_mutex_unlock(&guard); + if ((count % pCacheObj->checkTick) != 0) { continue; } @@ -739,8 +741,6 @@ void* taosCacheTimedRefresh(void *handle) { } taosTrashcanEmpty(pCacheObj, false); - - pthread_mutex_unlock(&guard); } } From eb4466adaa9797908992cbf53cec3643f5c73d2d Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Tue, 24 Aug 2021 15:13:40 +0800 Subject: [PATCH 156/165] fix: fix coredump for last_row query when last row is cached --- src/tsdb/src/tsdbRead.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 9cc9b7224c..d72d38bf2b 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1572,7 +1572,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfColsOfRow1 = 0; if (pSchema1 == NULL) { - pSchema1 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row1)); + pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); } if(isRow1DataRow) { numOfColsOfRow1 = schemaNCols(pSchema1); @@ -1584,7 +1584,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, if(row2) { isRow2DataRow = isDataRow(row2); if (pSchema2 == NULL) { - pSchema2 = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row2)); + pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); } if(isRow2DataRow) { numOfColsOfRow2 = schemaNCols(pSchema2); From 605844f2ea67943e6b988be81ee0f121f331cfeb Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Tue, 24 Aug 2021 16:55:47 +0800 Subject: [PATCH 157/165] [TD-6169]: windows dll client can not quit. --- src/util/src/tcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 6863558c5c..b4cf2b6658 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -537,8 +537,8 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { pCacheObj->deleting = 1; // wait for the refresh thread quit before destroying the cache object. - // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for 5 seconds. - for (int i = 0; i < 100&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { + // But in the dll, the child thread will be killed before atexit takes effect.So here we only wait for 2 seconds. + for (int i = 0; i < 40&&atomic_load_8(&pCacheObj->deleting) != 0; i++) { taosMsleep(50); } From 6e53e0a6badfc8870f3c469ff7ac3c73a19f68f4 Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Tue, 24 Aug 2021 18:43:02 +0800 Subject: [PATCH 158/165] [TD-6313]: improve error handling if loading taos failed in python (#7550) --- src/connector/python/taos/cinterface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 51e9a8667d..42dac3c2e8 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -49,7 +49,7 @@ def _load_taos(): try: return load_func[platform.system()]() except: - sys.exit("unsupported platform to TDengine connector") + raise InterfaceError('unsupported platform or failed to load taos client library') _libtaos = _load_taos() From 1351b5703629cc5233edf3e2c520887e31f5471a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 24 Aug 2021 20:27:49 +0800 Subject: [PATCH 159/165] [TD-6317] forbidden distinct with order by --- src/client/src/tscSQLParser.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 48c1a39a47..e157f26c9d 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5787,11 +5787,6 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; } - - if (pQueryInfo->distinct) { - pQueryInfo->order.order = TSDB_ORDER_ASC; - pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; - } } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { @@ -5805,14 +5800,13 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column"; const char* msg8 = "only column in groupby clause allowed as order column"; const char* msg9 = "orderby column must projected in subquery"; + const char* msg10 = "not support distinct mixed with order by"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - - if (pQueryInfo->distinct || pSqlNode->pSortOrder == NULL) { - return TSDB_CODE_SUCCESS; - } - + if (pSqlNode->pSortOrder == NULL) { + return TSDB_CODE_SUCCESS; + } char* pMsgBuf = tscGetErrorMsgPayload(pCmd); SArray* pSortOrder = pSqlNode->pSortOrder; @@ -5832,6 +5826,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq return invalidOperationMsg(pMsgBuf, msg2); } } + if (size > 0 && pQueryInfo->distinct) { + return invalidOperationMsg(pMsgBuf, msg10); + } // handle the first part of order by tVariant* pVar = taosArrayGet(pSortOrder, 0); From 9dc8e658796d82b9be884c82dd9af582adc12cab Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Tue, 24 Aug 2021 21:30:51 +0800 Subject: [PATCH 160/165] [TD-6337]: fix taosdemo.go compile and build guidelines [ci skip] --- documentation20/cn/08.connector/docs.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 364961ca63..0ac5a91b50 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -966,13 +966,17 @@ Go连接器支持的系统有: **提示:建议Go版本是1.13及以上,并开启模块支持:** ```sh - go env -w GO111MODULE=on - go env -w GOPROXY=https://goproxy.io,direct +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.io,direct ``` 在taosdemo.go所在目录下进行编译和执行: ```sh - go mod init *demo* - go build ./demo -h fqdn -p serverPort +go mod init taosdemo +go get github.com/taosdata/driver-go/taosSql +# use win branch in Windows platform. +#go get github.com/taosdata/driver-go/taosSql@win +go build +./taosdemo -h fqdn -p serverPort ``` ### Go连接器的使用 From 36c2980092751f13dacf5fccb1b1c094ef62728b Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 24 Aug 2021 22:42:40 +0800 Subject: [PATCH 161/165] [TS-106] : describe the difference of timezone setting between Win & Linux. --- documentation20/cn/11.administrator/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index ff44dd1225..29e49aa902 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -375,7 +375,7 @@ taos -C 或 taos --dump-config timezone GMT-8 timezone Asia/Shanghai ``` - 均是合法的设置东八区时区的格式。 + 均是合法的设置东八区时区的格式。但需注意,Windows 下并不支持 `timezone Asia/Shanghai` 这样的写法,而必须写成 `timezone UTC-8`。 时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如: ```sql From 87d54c1765dfa2f054173f5fae1ea2818a05c676 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 25 Aug 2021 08:05:58 +0800 Subject: [PATCH 162/165] [TD-6317] forbidden distinct with order by --- src/client/src/tscSQLParser.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e157f26c9d..e068eaae72 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5787,6 +5787,11 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; } + + if (pQueryInfo->distinct) { + pQueryInfo->order.order = TSDB_ORDER_ASC; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; + } } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { From 3d06d17b5343413c1bea7ccb3126b4223b7d3f6b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 25 Aug 2021 08:15:05 +0800 Subject: [PATCH 163/165] Feature/sangshuduo/td 5875 taosdemo ue improve (#7560) * [TD-5875]: taosdemo show progress * empty commit for CI * better msg for create child table. --- src/kit/taosdemo/taosdemo.c | 58 +++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 5d851eafd0..e0cc76d5a8 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -443,6 +443,7 @@ typedef struct SThreadInfo_S { uint64_t start_table_from; uint64_t end_table_to; int64_t ntables; + int64_t tables_created; uint64_t data_of_rate; int64_t start_time; char* cols; @@ -639,6 +640,7 @@ SArguments g_args = { static SDbs g_Dbs; static int64_t g_totalChildTables = 0; +static int64_t g_actualChildTables = 0; static SQueryMetaInfo g_queryInfo; static FILE * g_fpOfInsertResult = NULL; @@ -964,6 +966,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->num_of_tables = atoi(argv[++i]); + g_totalChildTables = arguments->num_of_tables; } else if (strcmp(argv[i], "-n") == 0) { if ((argc == i+1) || (!isStringNumber(argv[i+1]))) { @@ -1134,7 +1137,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } } else if ((strcmp(argv[i], "--version") == 0) || - (strcmp(argv[i], "-V") == 0)){ + (strcmp(argv[i], "-V") == 0)) { printVersion(); exit(0); } else if (strcmp(argv[i], "--help") == 0) { @@ -1345,14 +1348,14 @@ static void selectAndGetResult( } } -static char *rand_bool_str(){ +static char *rand_bool_str() { static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; return g_randbool_buff + ((cursor % MAX_PREPARED_RAND) * BOOL_BUFF_LEN); } -static int32_t rand_bool(){ +static int32_t rand_bool() { static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; @@ -1485,7 +1488,7 @@ static char *demo_phase_float_str() { return g_rand_phase_buff + ((cursor % MAX_PREPARED_RAND) * FLOAT_BUFF_LEN); } -static float UNUSED_FUNC demo_phase_float(){ +static float UNUSED_FUNC demo_phase_float() { static int cursor; cursor++; if (cursor > (MAX_PREPARED_RAND - 1)) cursor = 0; @@ -1564,7 +1567,7 @@ static void init_rand_data() { g_randdouble_buff = calloc(1, DOUBLE_BUFF_LEN * MAX_PREPARED_RAND); assert(g_randdouble_buff); - for (int i = 0; i < MAX_PREPARED_RAND; i++){ + for (int i = 0; i < MAX_PREPARED_RAND; i++) { g_randint[i] = (int)(taosRandom() % 65535); sprintf(g_randint_buff + i * INT_BUFF_LEN, "%d", g_randint[i]); @@ -3276,6 +3279,7 @@ static void* createTable(void *sarg) pThreadInfo->db_name, g_args.tb_prefix, i, pThreadInfo->cols); + batchNum ++; } else { if (stbInfo == NULL) { free(pThreadInfo->buffer); @@ -3325,13 +3329,14 @@ static void* createTable(void *sarg) len = 0; if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)){ + NO_INSERT_TYPE, false)) { errorPrint2("queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); free(pThreadInfo->buffer); return NULL; } + pThreadInfo->tables_created += batchNum; - uint64_t currentPrintTime = taosGetTimestampMs(); + uint64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", pThreadInfo->threadID, pThreadInfo->start_table_from, i); @@ -3401,6 +3406,7 @@ static int startMultiThreadCreateChildTable( pThreadInfo->use_metric = true; pThreadInfo->cols = cols; pThreadInfo->minDelay = UINT64_MAX; + pThreadInfo->tables_created = 0; pthread_create(pids + i, NULL, createTable, pThreadInfo); } @@ -3411,6 +3417,8 @@ static int startMultiThreadCreateChildTable( for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; taos_close(pThreadInfo->taos); + + g_actualChildTables += pThreadInfo->tables_created; } free(pids); @@ -3437,7 +3445,6 @@ static void createChildTables() { verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); uint64_t startFrom = 0; - g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; verbosePrint("%s() LN%d: create %"PRId64" child tables from %"PRIu64"\n", __func__, __LINE__, g_totalChildTables, startFrom); @@ -4232,6 +4239,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; + g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); if (dataSource && dataSource->type == cJSON_String @@ -4936,7 +4944,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON *result = cJSON_GetObjectItem(sql, "result"); if (result != NULL && result->type == cJSON_String - && result->valuestring != NULL){ + && result->valuestring != NULL) { tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); } else if (NULL == result) { @@ -7586,7 +7594,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } int ret = taos_stmt_prepare(pThreadInfo->stmt, stmtBuffer, 0); - if (ret != 0){ + if (ret != 0) { free(pids); free(infos); free(stmtBuffer); @@ -7932,18 +7940,30 @@ static int insertTestProcess() { double start; double end; - // create child tables - start = taosGetTimestampMs(); - createChildTables(); - end = taosGetTimestampMs(); - if (g_totalChildTables > 0) { - fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n", - (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + fprintf(stderr, + "creating %"PRId64" table(s) with %d thread(s)\n\n", + g_totalChildTables, g_Dbs.threadCountByCreateTbl); if (g_fpOfInsertResult) { fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n", - (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + "creating %"PRId64" table(s) with %d thread(s)\n\n", + g_totalChildTables, g_Dbs.threadCountByCreateTbl); + } + + // create child tables + start = taosGetTimestampMs(); + createChildTables(); + end = taosGetTimestampMs(); + + fprintf(stderr, + "Spent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n", + (end - start)/1000.0, g_totalChildTables, + g_Dbs.threadCountByCreateTbl, g_actualChildTables); + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.4f seconds to create %"PRId64" table(s) with %d thread(s), actual %"PRId64" table(s) created\n\n", + (end - start)/1000.0, g_totalChildTables, + g_Dbs.threadCountByCreateTbl, g_actualChildTables); } } From 993b4a0185605d31b5fd594acb86579b9f9b17d1 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 24 Aug 2021 17:18:32 +0800 Subject: [PATCH 164/165] [TD-6295][ci skip]: Update performace test script for perfMonitor --- tests/pytest/query/queryPerformance.py | 112 +++++++++++++++++----- tests/pytest/tools/taosdemoPerformance.py | 21 ++-- 2 files changed, 92 insertions(+), 41 deletions(-) diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py index 81103252d8..29e5cb19b7 100644 --- a/tests/pytest/query/queryPerformance.py +++ b/tests/pytest/query/queryPerformance.py @@ -17,6 +17,7 @@ import os import taos import time import argparse +import json class taosdemoQueryPerformace: @@ -48,7 +49,7 @@ class taosdemoQueryPerformace: cursor2 = self.conn2.cursor() cursor2.execute("create database if not exists %s" % self.dbName) cursor2.execute("use %s" % self.dbName) - cursor2.execute("create table if not exists %s(ts timestamp, query_time float, commit_id binary(50), branch binary(50), type binary(20)) tags(query_id int, query_sql binary(300))" % self.stbName) + cursor2.execute("create table if not exists %s(ts timestamp, query_time_avg float, query_time_max float, query_time_min float, commit_id binary(50), branch binary(50), type binary(20)) tags(query_id int, query_sql binary(300))" % self.stbName) sql = "select count(*) from test.meters" tableid = 1 @@ -74,7 +75,7 @@ class taosdemoQueryPerformace: tableid = 6 cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) - sql = "select * from meters" + sql = "select * from meters limit 10000" tableid = 7 cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) @@ -87,37 +88,96 @@ class taosdemoQueryPerformace: cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) cursor2.close() + + def generateQueryJson(self): + + sqls = [] + cursor2 = self.conn2.cursor() + cursor2.execute("select query_id, query_sql from %s.%s" % (self.dbName, self.stbName)) + i = 0 + for data in cursor2: + sql = { + "sql": data[1], + "result_mode": "onlyformat", + "result_file": "./query_sql_res%d.txt" % i + } + sqls.append(sql) + i += 1 + + query_data = { + "filetype": "query", + "cfgdir": "/etc/perf", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "test", + "specified_table_query": { + "query_times": 100, + "concurrent": 1, + "sqls": sqls + } + } + + query_json_file = f"/tmp/query.json" + + with open(query_json_file, 'w') as f: + json.dump(query_data, f) + return query_json_file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdemo" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def getCMDOutput(self, cmd): + cmd = os.popen(cmd) + output = cmd.read() + cmd.close() + return output def query(self): - cursor = self.conn.cursor() + buildPath = self.getBuildPath() + if (buildPath == ""): + print("taosdemo not found!") + sys.exit(1) + + binPath = buildPath + "/build/bin/" + os.system( + "%sperfMonitor -f %s > query_res.txt" % + (binPath, self.generateQueryJson())) + + cursor = self.conn2.cursor() print("==================== query performance ====================") - cursor.execute("use %s" % self.dbName) - cursor.execute("select tbname, query_id, query_sql from %s" % self.stbName) + cursor.execute("select tbname, query_sql from %s" % self.stbName) + i = 0 for data in cursor: table_name = data[0] - query_id = data[1] - sql = data[2] - - totalTime = 0 - cursor2 = self.conn.cursor() - cursor2.execute("use test") - for i in range(100): - if(self.clearCache == True): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - - startTime = time.time() - cursor2.execute(sql) - totalTime += time.time() - startTime - cursor2.close() - print("query time for: %s %f seconds" % (sql, totalTime / 100)) - - cursor3 = self.conn2.cursor() - cursor3.execute("insert into %s.%s values(now, %f, '%s', '%s', '%s')" % (self.dbName, table_name, totalTime / 100, self.commitID, self.branch, self.type)) + sql = data[1] - cursor3.close() + self.avgDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $2}'" % (i + 1)) + self.maxDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $5}'" % (i + 1)) + self.minDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $8}'" % (i + 1)) + i += 1 + + print("query time for: %s %f seconds" % (sql, float(self.avgDelay))) + c = self.conn2.cursor() + c.execute("insert into %s.%s values(now, %f, %f, %f, '%s', '%s', '%s')" % (self.dbName, table_name, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.commitID, self.branch, self.type)) + + c.close() cursor.close() if __name__ == '__main__': @@ -174,4 +234,4 @@ if __name__ == '__main__': args = parser.parse_args() perftest = taosdemoQueryPerformace(args.remove_cache, args.commit_id, args.database_name, args.stable_name, args.table_perfix, args.git_branch, args.build_type) perftest.createPerfTables() - perftest.query() + perftest.query() \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 1d28a2708f..51b064a08e 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -49,24 +49,18 @@ class taosdemoPerformace: def generateJson(self): db = { "name": "%s" % self.insertDB, - "drop": "yes", - "replica": 1 + "drop": "yes" } stb = { "name": "meters", - "child_table_exists": "no", "childtable_count": self.numOfTables, "childtable_prefix": "stb_", - "auto_create_table": "no", - "data_source": "rand", "batch_create_tbl_num": 10, - "insert_mode": "taosc", + "insert_mode": "rand", "insert_rows": self.numOfRows, - "interlace_rows": 0, - "max_sql_len": 1024000, - "disorder_ratio": 0, - "disorder_range": 1000, + "batch_rows": 1000000, + "max_sql_len": 1048576, "timestamp_step": 1, "start_timestamp": "2020-10-01 00:00:00.000", "sample_format": "csv", @@ -100,11 +94,8 @@ class taosdemoPerformace: "user": "root", "password": "taosdata", "thread_count": 10, - "thread_count_create_tbl": 10, + "thread_count_create_tbl": 4, "result_file": "./insert_res.txt", - "confirm_parameter_prompt": "no", - "insert_interval": 0, - "num_of_records_per_req": 30000, "databases": [db] } @@ -145,7 +136,7 @@ class taosdemoPerformace: binPath = buildPath + "/build/bin/" os.system( - "%staosdemo -f %s > /dev/null 2>&1" % + "%sperfMonitor -f %s > /dev/null 2>&1" % (binPath, self.generateJson())) self.createTableTime = self.getCMDOutput( "grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'") From 5584038709d5ff3c392ca222e681de1ae52e5b32 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Wed, 25 Aug 2021 11:37:09 +0800 Subject: [PATCH 165/165] [TD-2639] : update description about operator "like". --- documentation20/cn/12.taos-sql/docs.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 48409537bb..b183b6e419 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -206,10 +206,6 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 显示当前数据库下的所有数据表信息。 - 说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) - - 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。 - - **显示一个数据表的创建语句** ```mysql @@ -718,15 +714,19 @@ Query OK, 1 row(s) in set (0.001091s) | = | equal to | all types | | <> | not equal to | all types | | between and | within a certain range | **`timestamp`** and all numeric types | -| in | matches any value in a set | all types except first column `timestamp` | +| in | match any value in a set | all types except first column `timestamp` | +| like | match a wildcard string | **`binary`** **`nchar`** | | % | match with any char sequences | **`binary`** **`nchar`** | | _ | match with a single char | **`binary`** **`nchar`** | 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 -2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 -3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 -4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 -5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 +2. like 算子使用通配符字符串进行匹配检查。 + * 在通配符字符串中:'%'(百分号)匹配 0 到任意个字符;'\_'(下划线)匹配单个任意字符。 + * 通配符字符串最长不能超过 20 字节。(从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) +3. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 +4. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 +5. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 +6. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 ### UNION ALL 操作符