From 4b564b2ebd358179e3aef8b1188bbebec0afba2a Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 13 May 2022 15:09:31 +0800 Subject: [PATCH 01/21] add stream ci --- source/libs/executor/inc/executorimpl.h | 2 +- source/libs/executor/src/executorimpl.c | 13 +- source/libs/executor/src/scanoperator.c | 9 +- source/libs/planner/src/planOptimizer.c | 4 +- source/libs/stream/src/tstream.c | 2 +- tests/script/jenkins/basic.txt | 1 + tests/script/tsim/tstream/basic1.sim | 343 ++++++++++++++++++++++++ 7 files changed, 355 insertions(+), 19 deletions(-) create mode 100644 tests/script/tsim/tstream/basic1.sim diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 34b7fce33c..f79d3c8450 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -706,7 +706,7 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SSDataBlock* pResBlock, SArray* pColList, SArray* pTableIdList, SExecTaskInfo* pTaskInfo, - SNode* pConditions, SOperatorInfo* pOperatorDumy, SInterval* pInterval); + SNode* pConditions, SOperatorInfo* pOperatorDumy); SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal, diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 8db5a282d3..df446def5e 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -4808,17 +4808,6 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } SDataBlockDescNode* pDescNode = pScanPhyNode->node.pOutputDataBlockDesc; - - SArray* pColList = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID); - SSDataBlock* pResBlockDumy = createResDataBlock(pDescNode); - - SQueryTableDataCond cond = {0}; - int32_t code = initQueryTableDataCond(&cond, pTableScanNode); - if (code != TSDB_CODE_SUCCESS) { - return NULL; - } - - SInterval interval = extractIntervalInfo(pTableScanNode); SOperatorInfo* pOperatorDumy = createTableScanOperatorInfo(pTableScanNode, pDataReader, pHandle, pTaskInfo); SArray* tableIdList = extractTableIdList(pTableGroupInfo); @@ -4826,7 +4815,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SArray* pCols = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID); SOperatorInfo* pOperator = createStreamScanOperatorInfo(pHandle->reader, pDataReader, pResBlock, pCols, tableIdList, pTaskInfo, - pScanPhyNode->node.pConditions, pOperatorDumy, &interval); + pScanPhyNode->node.pConditions, pOperatorDumy); taosArrayDestroy(tableIdList); return pOperator; } else if (QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN == type) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index e0758a7210..c6cb01e8fb 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -782,8 +782,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) { SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataReader, SSDataBlock* pResBlock, SArray* pColList, SArray* pTableIdList, - SExecTaskInfo* pTaskInfo, SNode* pCondition, SOperatorInfo* pOperatorDumy, - SInterval* pInterval) { + SExecTaskInfo* pTaskInfo, SNode* pCondition, SOperatorInfo* pOperatorDumy ) { SStreamBlockScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamBlockScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -791,6 +790,8 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR goto _error; } + STableScanInfo* pSTInfo = (STableScanInfo*)pOperatorDumy->info; + int32_t numOfOutput = taosArrayGetSize(pColList); SArray* pColIds = taosArrayInit(4, sizeof(int16_t)); @@ -823,7 +824,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR } pInfo->primaryTsIndex = 0; // TODO(liuyao) get it from physical plan - pInfo->pUpdateInfo = updateInfoInitP(pInterval, 10000); // TODO(liuyao) get watermark from physical plan + pInfo->pUpdateInfo = updateInfoInitP(&pSTInfo->interval, 10000); // TODO(liuyao) get watermark from physical plan if (pInfo->pUpdateInfo == NULL) { taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pOperator); @@ -836,7 +837,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* streamReadHandle, void* pDataR pInfo->pDataReader = pDataReader; pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE; pInfo->pOperatorDumy = pOperatorDumy; - pInfo->interval = *pInterval; + pInfo->interval = pSTInfo->interval; pOperator->name = "StreamBlockScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN; diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 5ed7d9c1b5..6ae1b23b97 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -228,10 +228,12 @@ static void setScanWindowInfo(SScanLogicNode* pScan) { static int32_t osdOptimize(SOptimizeContext* pCxt, SLogicNode* pLogicNode) { SOsdInfo info = {0}; int32_t code = osdMatch(pCxt, pLogicNode, &info); + if (TSDB_CODE_SUCCESS == code && info.pScan) { + setScanWindowInfo((SScanLogicNode*)info.pScan); + } if (TSDB_CODE_SUCCESS == code && (NULL != info.pDsoFuncs || NULL != info.pSdrFuncs)) { info.pScan->dataRequired = osdGetDataRequired(info.pSdrFuncs); info.pScan->pDynamicScanFuncs = info.pDsoFuncs; - setScanWindowInfo((SScanLogicNode*)info.pScan); OPTIMIZE_FLAG_SET_MASK(info.pScan->node.optimizedFlag, OPTIMIZE_FLAG_OSD); pCxt->optimized = true; } diff --git a/source/libs/stream/src/tstream.c b/source/libs/stream/src/tstream.c index a5811a5ace..08093c8b18 100644 --- a/source/libs/stream/src/tstream.c +++ b/source/libs/stream/src/tstream.c @@ -154,7 +154,7 @@ int32_t streamExecTask(SStreamTask* pTask, SMsgCb* pMsgCb, const void* input, in // sink if (pTask->sinkType == TASK_SINK__TABLE) { - blockDebugShowData(pRes); + // blockDebugShowData(pRes); pTask->tbSink.tbSinkFunc(pTask, pTask->tbSink.vnode, 0, pRes); } else if (pTask->sinkType == TASK_SINK__SMA) { pTask->smaSink.smaSink(pTask->ahandle, pTask->smaSink.smaId, pRes); diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 8420b91065..f6bc9f8306 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -63,6 +63,7 @@ # ---- tstream ./test.sh -f tsim/tstream/basic0.sim +./test.sh -f tsim/tstream/basic1.sim # ---- transaction ./test.sh -f tsim/trans/create_db.sim diff --git a/tests/script/tsim/tstream/basic1.sim b/tests/script/tsim/tstream/basic1.sim new file mode 100644 index 0000000000..12c820d76b --- /dev/null +++ b/tests/script/tsim/tstream/basic1.sim @@ -0,0 +1,343 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sleep 50 +sql connect + +print =============== create database +sql create database test vgroups 1 +sql show databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use test + + +sql create table t1(ts timestamp, a int, b int , c int, d double); +sql create stream streams1 trigger at_once into streamt as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s); +sql insert into t1 values(1648791213000,1,2,3,1.0); +sql insert into t1 values(1648791223001,2,2,3,1.1); +sql insert into t1 values(1648791233002,3,2,3,2.1); +sql insert into t1 values(1648791243003,4,2,3,3.1); +sql insert into t1 values(1648791213004,4,2,3,4.1); +sleep 1000 +sql select _wstartts, c1, c2 ,c3 ,c4, c5 from streamt; + +if $rows != 4 then + print ======$rows + return -1 +endi + +# row 0 +if $data01 != 2 then + print ======$data01 + return -1 +endi + +if $data02 != 2 then + print ======$data02 + return -1 +endi + +if $data03 != 5 then + print ======$data03 + return -1 +endi + +if $data04 != 2 then + print ======$data04 + return -1 +endi + +if $data05 != 3 then + print ======$data05 + return -1 +endi + +# row 1 +if $data11 != 1 then + print ======$data11 + return -1 +endi + +if $data12 != 1 then + print ======$data12 + return -1 +endi + +if $data13 != 2 then + print ======$data13 + return -1 +endi + +if $data14 != 2 then + print ======$data14 + return -1 +endi + +if $data15 != 3 then + print ======$data15 + return -1 +endi + +# row 2 +if $data21 != 1 then + print ======$data21 + return -1 +endi + +if $data22 != 1 then + print ======$data22 + return -1 +endi + +if $data23 != 3 then + print ======$data23 + return -1 +endi + +if $data24 != 2 then + print ======$data24 + return -1 +endi + +if $data25 != 3 then + print ======$data25 + return -1 +endi + +# row 3 +if $data31 != 1 then + print ======$data31 + return -1 +endi + +if $data32 != 1 then + print ======$data32 + return -1 +endi + +if $data33 != 4 then + print ======$data33 + return -1 +endi + +if $data34 != 2 then + print ======$data34 + return -1 +endi + +if $data35 != 3 then + print ======$data35 + return -1 +endi + +sql insert into t1 values(1648791223001,12,14,13,11.1); +sleep 100 +sql select _wstartts, c1, c2 ,c3 ,c4, c5 from streamt; + +if $rows != 4 then + print ======$rows + return -1 +endi + +# row 0 +if $data01 != 2 then + print ======$data01 + return -1 +endi + +if $data02 != 2 then + print ======$data02 + return -1 +endi + +if $data03 != 5 then + print ======$data03 + return -1 +endi + +if $data04 != 2 then + print ======$data04 + return -1 +endi + +if $data05 != 3 then + print ======$data05 + return -1 +endi + +# row 1 +if $data11 != 1 then + print ======$data11 + return -1 +endi + +if $data12 != 1 then + print ======$data12 + return -1 +endi + +if $data13 != 12 then + print ======$data13 + return -1 +endi + +if $data14 != 14 then + print ======$data14 + return -1 +endi + +if $data15 != 13 then + print ======$data15 + return -1 +endi + +# row 2 +if $data21 != 1 then + print ======$data21 + return -1 +endi + +if $data22 != 1 then + print ======$data22 + return -1 +endi + +if $data23 != 3 then + print ======$data23 + return -1 +endi + +if $data24 != 2 then + print ======$data24 + return -1 +endi + +if $data25 != 3 then + print ======$data25 + return -1 +endi + +# row 3 +if $data31 != 1 then + print ======$data31 + return -1 +endi + +if $data32 != 1 then + print ======$data32 + return -1 +endi + +if $data33 != 4 then + print ======$data33 + return -1 +endi + +if $data34 != 2 then + print ======$data34 + return -1 +endi + +if $data35 != 3 then + print ======$data35 + return -1 +endi + +sql insert into t1 values(1648791223002,12,14,13,11.1); +sleep 100 +sql select _wstartts, c1, c2 ,c3 ,c4, c5 from streamt; + +# row 1 +if $data11 != 2 then + print ======$data11 + return -1 +endi + +if $data12 != 2 then + print ======$data12 + return -1 +endi + +if $data13 != 24 then + print ======$data13 + return -1 +endi + +if $data14 != 14 then + print ======$data14 + return -1 +endi + +if $data15 != 13 then + print ======$data15 + return -1 +endi + +sql insert into t1 values(1648791223003,12,14,13,11.1); +sleep 100 +sql select _wstartts, c1, c2 ,c3 ,c4, c5 from streamt; + +# row 1 +if $data11 != 3 then + print ======$data11 + return -1 +endi + +if $data12 != 3 then + print ======$data12 + return -1 +endi + +if $data13 != 36 then + print ======$data13 + return -1 +endi + +if $data14 != 14 then + print ======$data14 + return -1 +endi + +if $data15 != 13 then + print ======$data15 + return -1 +endi + +sql insert into t1 values(1648791223001,1,1,1,1.1); +sql insert into t1 values(1648791223002,2,2,2,2.1); +sql insert into t1 values(1648791223003,3,3,3,3.1); +sleep 100 +sql select _wstartts, c1, c2 ,c3 ,c4, c5 from streamt; + +# row 1 +if $data11 != 3 then + print ======$data11 + return -1 +endi + +if $data12 != 3 then + print ======$data12 + return -1 +endi + +if $data13 != 6 then + print ======$data13 + return -1 +endi + +if $data14 != 3 then + print ======$data14 + return -1 +endi + +if $data15 != 1 then + print ======$data15 + return -1 +endi + +#system sh/exec.sh -n dnode1 -s stop -x SIGINT From 68ad7008bf3cc30a428f7c7ad3bae7ec03751d2c Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 13 May 2022 15:54:15 +0800 Subject: [PATCH 02/21] test: add error test case for rerun fail --- tests/system-test/99-TDcase/TD-15554.py | 490 ++++++++++++++++++++++++ 1 file changed, 490 insertions(+) create mode 100644 tests/system-test/99-TDcase/TD-15554.py diff --git a/tests/system-test/99-TDcase/TD-15554.py b/tests/system-test/99-TDcase/TD-15554.py new file mode 100644 index 0000000000..fb3817ed89 --- /dev/null +++ b/tests/system-test/99-TDcase/TD-15554.py @@ -0,0 +1,490 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: Produce while consume") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db', \ + 'vgroups': 1, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 1000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + time.sleep(2) + + # wait stb ready + while 1: + tdSql.query("show %s.stables"%parameterDict['dbName']) + if tdSql.getRows() == 1: + break + else: + time.sleep(1) + + tdLog.info("create topics from super table") + topicFromStb = 'topic_stb_column' + topicFromCtb = 'topic_ctb_column' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName'])) + + time.sleep(1) + tdSql.query("show topics") + #tdSql.checkRows(2) + topic1 = tdSql.getData(0 , 0) + topic2 = tdSql.getData(1 , 0) + + tdLog.info("show topics: %s, %s"%(topic1, topic2)) + if topic1 != topicFromStb and topic1 != topicFromCtb: + tdLog.exit("topic error1") + if topic2 != topicFromStb and topic2 != topicFromCtb: + tdLog.exit("topic error2") + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)") + tdSql.query("create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)") + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicFromStb + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into consumeinfo values " + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + tdLog.info("check stb if there are data") + while 1: + tdSql.query("select count(*) from %s"%parameterDict["stbName"]) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + countOfStb = tdSql.getData(0, 0) + if countOfStb != 0: + tdLog.info("count from stb: %d"%countOfStb) + break + else: + time.sleep(1) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + + shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from consumeresult") + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + tdLog.info("consumer result: %d, %d"%(tdSql.getData(0 , 2), tdSql.getData(0 , 3))) + tdSql.checkData(0 , 1, consumerId) + # mulit rows and mulit tables in one sql, this num of msg is not sure + #tdSql.checkData(0 , 2, expectmsgcnt) + tdSql.checkData(0 , 3, expectrowcnt) + + tdSql.query("drop topic %s"%topicFromStb) + tdSql.query("drop topic %s"%topicFromCtb) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: add child table with consuming ") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db2', \ + 'vgroups': 1, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + # wait db ready + while 1: + tdSql.query("show databases") + if tdSql.getRows() == 4: + print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),) + break + else: + time.sleep(1) + + tdSql.query("use %s"%parameterDict['dbName']) + # wait stb ready + while 1: + tdSql.query("show %s.stables"%parameterDict['dbName']) + if tdSql.getRows() == 1: + break + else: + time.sleep(1) + + tdLog.info("create topics from super table") + topicFromStb = 'topic_stb_column2' + topicFromCtb = 'topic_ctb_column2' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName'])) + + time.sleep(1) + tdSql.query("show topics") + topic1 = tdSql.getData(0 , 0) + topic2 = tdSql.getData(1 , 0) + tdLog.info("show topics: %s, %s"%(topic1, topic2)) + if topic1 != topicFromStb and topic1 != topicFromCtb: + tdLog.exit("topic error1") + if topic2 != topicFromStb and topic2 != topicFromCtb: + tdLog.exit("topic error2") + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + rowsOfNewCtb = 1000 + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + rowsOfNewCtb + topicList = topicFromStb + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into consumeinfo values " + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + tdLog.info("check stb if there are data") + while 1: + tdSql.query("select count(*) from %s"%parameterDict["stbName"]) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + countOfStb = tdSql.getData(0, 0) + if countOfStb != 0: + tdLog.info("count from stb: %d"%countOfStb) + break + else: + time.sleep(1) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + + shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + # create new child table and insert data + newCtbName = 'newctb' + tdSql.query("create table %s.%s using %s.%s tags(9999)"%(parameterDict["dbName"], newCtbName, parameterDict["dbName"], parameterDict["stbName"])) + startTs = parameterDict["startTs"] + for j in range(rowsOfNewCtb): + sql = "insert into %s.%s values (%d, %d, 'tmqrow_%d') "%(parameterDict["dbName"], newCtbName, startTs + j, j, j) + tdSql.execute(sql) + tdLog.debug("insert data into new child table ............ [OK]") + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from consumeresult") + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + tdSql.checkData(0 , 1, consumerId) + tdSql.checkData(0 , 3, expectrowcnt) + + tdSql.query("drop topic %s"%topicFromStb) + tdSql.query("drop topic %s"%topicFromCtb) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: tow topics, each contains a stable, \ + but at the beginning, no ctables in the stable of one topic,\ + after starting consumer, create ctables ") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db3', \ + 'vgroups': 1, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 30000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + # wait db ready + while 1: + tdSql.query("show databases") + if tdSql.getRows() == 4: + print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),) + break + else: + time.sleep(1) + + tdSql.query("use %s"%parameterDict['dbName']) + # wait stb ready + while 1: + tdSql.query("show %s.stables"%parameterDict['dbName']) + if tdSql.getRows() == 1: + break + else: + time.sleep(1) + + tdLog.info("create stable2 for the seconde topic") + parameterDict2 = {'cfg': '', \ + 'dbName': 'db3', \ + 'vgroups': 1, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 30000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict2['cfg'] = cfgPath + tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName'])) + + tdLog.info("create topics from super table") + topicFromStb = 'topic_stb_column3' + topicFromStb2 = 'topic_stb_column32' + + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName'])) + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict2['dbName'], parameterDict2['stbName'])) + + tdSql.query("show topics") + topic1 = tdSql.getData(0 , 0) + topic2 = tdSql.getData(1 , 0) + tdLog.info("show topics: %s, %s"%(topic1, topic2)) + if topic1 != topicFromStb and topic1 != topicFromStb2: + tdLog.exit("topic error1") + if topic2 != topicFromStb and topic2 != topicFromStb2: + tdLog.exit("topic error2") + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicFromStb + ',' + topicFromStb2 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into consumeinfo values " + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + tdLog.info("check stb if there are data") + while 1: + tdSql.query("select count(*) from %s"%parameterDict["stbName"]) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + countOfStb = tdSql.getData(0, 0) + if countOfStb != 0: + tdLog.info("count from stb: %d"%countOfStb) + break + else: + time.sleep(1) + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + + shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + # start the second thread to create new child table and insert data + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from consumeresult") + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + tdSql.checkData(0 , 1, consumerId) + tdSql.checkData(0 , 3, expectrowcnt) + + tdSql.query("drop topic %s"%topicFromStb) + tdSql.query("drop topic %s"%topicFromStb2) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + #self.tmqCase1(cfgPath, buildPath) + #self.tmqCase2(cfgPath, buildPath) + self.tmqCase3(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 26aef324f7f1fc9e196111cd3a5f2b4208c740fd Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 13 May 2022 15:59:56 +0800 Subject: [PATCH 03/21] enh(rpc): add more log --- source/libs/transport/src/transCli.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 5570bdcd3e..cd7e3beb13 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -13,7 +13,6 @@ */ #ifdef USE_UV - #include "transComm.h" typedef struct SCliConn { @@ -706,7 +705,8 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrdObj* pThrd) { void cliHandleReq(SCliMsg* pMsg, SCliThrdObj* pThrd) { uint64_t et = taosGetTimestampUs(); uint64_t el = et - pMsg->st; - tTrace("%s cli msg tran time cost: %" PRIu64 "us", ((STrans*)pThrd->pTransInst)->label, el); + tTrace("%s cli msg tran time cost: %" PRIu64 "us, threadID: %" PRId64 "", ((STrans*)pThrd->pTransInst)->label, el, + pThrd->thread); STransConnCtx* pCtx = pMsg->ctx; STrans* pTransInst = pThrd->pTransInst; @@ -1029,8 +1029,8 @@ void transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STra SCliThrdObj* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[index]; - tDebug("send request at thread:%d %p, dst: %s:%d, app:%p", index, pReq, EPSET_GET_INUSE_IP(&pCtx->epSet), - EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->ahandle); + tDebug("send request at thread:%d, threadID: %" PRId64 ", msg: %p, dst: %s:%d, app:%p", index, thrd->thread, pReq, + EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->ahandle); ASSERT(transSendAsync(thrd->asyncPool, &(cliMsg->q)) == 0); } @@ -1057,8 +1057,8 @@ void transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransM cliMsg->type = Normal; SCliThrdObj* thrd = ((SCliObj*)pTransInst->tcphandle)->pThreadObj[index]; - tDebug("send request at thread:%d %p, dst: %s:%d, app:%p", index, pReq, EPSET_GET_INUSE_IP(&pCtx->epSet), - EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->ahandle); + tDebug("send request at thread:%d, threadID:%" PRId64 ", msg: %p, dst: %s:%d, app:%p", index, thrd->thread, pReq, + EPSET_GET_INUSE_IP(&pCtx->epSet), EPSET_GET_INUSE_PORT(&pCtx->epSet), pReq->ahandle); transSendAsync(thrd->asyncPool, &(cliMsg->q)); tsem_t* pSem = pCtx->pSem; From d9590b44737966966b13ca3e5fd9842bd2c8b99d Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 13 May 2022 16:23:06 +0800 Subject: [PATCH 04/21] ci(stream): add stream ci --- tests/script/tsim/tstream/basic1.sim | 34 +++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tests/script/tsim/tstream/basic1.sim b/tests/script/tsim/tstream/basic1.sim index 12c820d76b..3bb5943b3b 100644 --- a/tests/script/tsim/tstream/basic1.sim +++ b/tests/script/tsim/tstream/basic1.sim @@ -340,4 +340,36 @@ if $data15 != 1 then return -1 endi -#system sh/exec.sh -n dnode1 -s stop -x SIGINT +sql insert into t1 values(1648791233003,3,2,3,2.1); +sql insert into t1 values(1648791233002,5,6,7,8.1); +sql insert into t1 values(1648791233002,3,2,3,2.1); +sleep 100 +sql select _wstartts, c1, c2 ,c3 ,c4, c5 from streamt; + +# row 2 +if $data21 != 2 then + print ======$data21 + return -1 +endi + +if $data22 != 2 then + print ======$data22 + return -1 +endi + +if $data23 != 6 then + print ======$data23 + return -1 +endi + +if $data24 != 2 then + print ======$data24 + return -1 +endi + +if $data25 != 3 then + print ======$data25 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From d6fca91f9f54531f6c51124c4d2f2d03046f95f4 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 May 2022 17:02:12 +0800 Subject: [PATCH 05/21] fix: crash when executing create database test vgroups 1024 --- source/dnode/mgmt/mgmt_vnode/inc/vmInt.h | 4 --- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 31 ++++++++------------- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 2 -- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 5 +--- 4 files changed, 13 insertions(+), 29 deletions(-) diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 51b3860461..5d8ec50e81 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -46,8 +46,6 @@ typedef struct { int32_t vgId; int32_t vgVersion; int8_t dropped; - uint64_t dbUid; - char db[TSDB_DB_FNAME_LEN]; char path[PATH_MAX + 20]; } SWrapperCfg; @@ -57,8 +55,6 @@ typedef struct { int32_t vgVersion; int8_t dropped; int8_t accessState; - uint64_t dbUid; - char *db; char *path; SVnode *pImpl; STaosQueue *pWriteQ; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index f251dd120e..ba4293eeb2 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -47,7 +47,7 @@ SVnodeObj **vmGetVnodeListFromHash(SVnodesMgmt *pMgmt, int32_t *numOfVnodes) { int32_t vmGetVnodeListFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes) { int32_t code = TSDB_CODE_INVALID_JSON_FORMAT; int32_t len = 0; - int32_t maxLen = 30000; + int32_t maxLen = 1024 * 1024; char *content = taosMemoryCalloc(1, maxLen + 1); cJSON *root = NULL; FILE *fp = NULL; @@ -64,6 +64,11 @@ int32_t vmGetVnodeListFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t goto _OVER; } + if (content == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + len = (int32_t)taosReadFile(pFile, content, maxLen); if (len <= 0) { dError("failed to read %s since content is null", file); @@ -116,20 +121,6 @@ int32_t vmGetVnodeListFromFile(SVnodesMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t goto _OVER; } pCfg->vgVersion = vgVersion->valueint; - - cJSON *dbUid = cJSON_GetObjectItem(vnode, "dbUid"); - if (!dbUid || dbUid->type != cJSON_String) { - dError("failed to read %s since dbUid not found", file); - goto _OVER; - } - pCfg->dbUid = atoll(dbUid->valuestring); - - cJSON *db = cJSON_GetObjectItem(vnode, "db"); - if (!db || db->type != cJSON_String) { - dError("failed to read %s since db not found", file); - goto _OVER; - } - tstrncpy(pCfg->db, db->valuestring, TSDB_DB_FNAME_LEN); } *ppCfgs = pCfgs; @@ -165,8 +156,12 @@ int32_t vmWriteVnodeListToFile(SVnodesMgmt *pMgmt) { SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes); int32_t len = 0; - int32_t maxLen = 65536; + int32_t maxLen = 1024 * 1024; char *content = taosMemoryCalloc(1, maxLen + 1); + if (content == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } len += snprintf(content + len, maxLen - len, "{\n"); len += snprintf(content + len, maxLen - len, " \"vnodes\": [\n"); @@ -175,9 +170,7 @@ int32_t vmWriteVnodeListToFile(SVnodesMgmt *pMgmt) { len += snprintf(content + len, maxLen - len, " {\n"); len += snprintf(content + len, maxLen - len, " \"vgId\": %d,\n", pVnode->vgId); len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pVnode->dropped); - len += snprintf(content + len, maxLen - len, " \"vgVersion\": %d,\n", pVnode->vgVersion); - len += snprintf(content + len, maxLen - len, " \"dbUid\": \"%" PRIu64 "\",\n", pVnode->dbUid); - len += snprintf(content + len, maxLen - len, " \"db\": \"%s\"\n", pVnode->db); + len += snprintf(content + len, maxLen - len, " \"vgVersion\": %d\n", pVnode->vgVersion); if (i < numOfVnodes - 1) { len += snprintf(content + len, maxLen - len, " },\n"); } else { diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 914acce2ea..be4cfb95e2 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -174,8 +174,6 @@ static void vmGenerateWrapperCfg(SVnodesMgmt *pMgmt, SCreateVnodeReq *pCreate, S pCfg->vgId = pCreate->vgId; pCfg->vgVersion = pCreate->vgVersion; pCfg->dropped = 0; - pCfg->dbUid = pCreate->dbUid; - tstrncpy(pCfg->db, pCreate->db, TSDB_DB_FNAME_LEN); snprintf(pCfg->path, sizeof(pCfg->path), "%s%svnode%d", pMgmt->path, TD_DIRSEP, pCreate->vgId); } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index af439fcc03..9a0a524267 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -57,13 +57,11 @@ int32_t vmOpenVnode(SVnodesMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { pVnode->vgVersion = pCfg->vgVersion; pVnode->dropped = 0; pVnode->accessState = TSDB_VN_ALL_ACCCESS; - pVnode->dbUid = pCfg->dbUid; - pVnode->db = tstrdup(pCfg->db); pVnode->path = tstrdup(pCfg->path); pVnode->pImpl = pImpl; pVnode->pWrapper = pMgmt->pWrapper; - if (pVnode->path == NULL || pVnode->db == NULL) { + if (pVnode->path == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } @@ -109,7 +107,6 @@ void vmCloseVnode(SVnodesMgmt *pMgmt, SVnodeObj *pVnode) { } taosMemoryFree(pVnode->path); - taosMemoryFree(pVnode->db); taosMemoryFree(pVnode); } From 5461f3ef120a583efd5ff9d920199f0aa9d7e786 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 13 May 2022 17:13:20 +0800 Subject: [PATCH 06/21] test: add error case for rerun fail --- tests/system-test/99-TDcase/TD-15557.py | 215 ++++++++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 tests/system-test/99-TDcase/TD-15557.py diff --git a/tests/system-test/99-TDcase/TD-15557.py b/tests/system-test/99-TDcase/TD-15557.py new file mode 100644 index 0000000000..4798bb7c8d --- /dev/null +++ b/tests/system-test/99-TDcase/TD-15557.py @@ -0,0 +1,215 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: Produce while consume to subscribe one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db1', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + + shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + tdLog.info("consumer result: %d, %d"%(tdSql.getData(0 , 2), tdSql.getData(0 , 3))) + tdSql.checkData(0 , 1, consumerId) + # mulit rows and mulit tables in one sql, this num of msg is not sure + #tdSql.checkData(0 , 2, expectmsgcnt) + tdSql.checkData(0 , 3, expectrowcnt) + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase1(cfgPath, buildPath) + #self.tmqCase2(cfgPath, buildPath) + #self.tmqCase3(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 56c1b070160925985be2ad42b5af2634257d8ef4 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 May 2022 17:18:35 +0800 Subject: [PATCH 07/21] fix: error msg incorrect when no enough memory to create vnode --- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 1 + source/dnode/vnode/src/vnd/vnodeOpen.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index be4cfb95e2..528f6a0ffe 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -225,6 +225,7 @@ int32_t vmProcessCreateVnodeReq(SVnodesMgmt *pMgmt, SNodeMsg *pMsg) { SVnode *pImpl = vnodeOpen(path, pMgmt->pTfs, msgCb); if (pImpl == NULL) { dError("vgId:%d, failed to create vnode since %s", createReq.vgId, terrstr()); + code = terrno; goto _OVER; } diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index ae134e6496..7a0561327f 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -137,18 +137,21 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { // open query if (vnodeQueryOpen(pVnode)) { vError("vgId:%d failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno)); + terrno = TAOS_SYSTEM_ERROR(terrno); goto _err; } // vnode begin if (vnodeBegin(pVnode) < 0) { vError("vgId:%d failed to begin since %s", TD_VID(pVnode), tstrerror(terrno)); + terrno = TAOS_SYSTEM_ERROR(terrno); goto _err; } // open sync if (vnodeSyncOpen(pVnode, dir)) { vError("vgId:%d failed to open sync since %s", TD_VID(pVnode), tstrerror(terrno)); + terrno = TAOS_SYSTEM_ERROR(terrno); goto _err; } From 2178eabcd8bb30b67a673bb7b8e550051ceab268 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 May 2022 17:36:32 +0800 Subject: [PATCH 08/21] fix: error msg incorrect when no enough memory to create vnode --- source/dnode/mnode/impl/src/mndTrans.c | 2 +- source/dnode/vnode/src/vnd/vnodeOpen.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 5085de8610..4828b9f523 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1051,7 +1051,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans) { int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->redoActions); if (code != 0 && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { - mError("failed to execute redoActions since %s", terrstr()); + mError("failed to execute redoActions since:%s, code:0x%x", terrstr(), terrno); } return code; } diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 7a0561327f..3f29700cb7 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -137,21 +137,21 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { // open query if (vnodeQueryOpen(pVnode)) { vError("vgId:%d failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno)); - terrno = TAOS_SYSTEM_ERROR(terrno); + terrno = TAOS_SYSTEM_ERROR(errno); goto _err; } // vnode begin if (vnodeBegin(pVnode) < 0) { vError("vgId:%d failed to begin since %s", TD_VID(pVnode), tstrerror(terrno)); - terrno = TAOS_SYSTEM_ERROR(terrno); + terrno = TAOS_SYSTEM_ERROR(errno); goto _err; } // open sync if (vnodeSyncOpen(pVnode, dir)) { vError("vgId:%d failed to open sync since %s", TD_VID(pVnode), tstrerror(terrno)); - terrno = TAOS_SYSTEM_ERROR(terrno); + terrno = TAOS_SYSTEM_ERROR(errno); goto _err; } From 558d50fef2326946596491f438947a79c0e1d8b6 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 May 2022 17:40:22 +0800 Subject: [PATCH 09/21] fix: error msg incorrect when no enough memory to create vnode --- source/dnode/vnode/src/vnd/vnodeOpen.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c index 3f29700cb7..7476da2a0f 100644 --- a/source/dnode/vnode/src/vnd/vnodeOpen.c +++ b/source/dnode/vnode/src/vnd/vnodeOpen.c @@ -137,21 +137,21 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) { // open query if (vnodeQueryOpen(pVnode)) { vError("vgId:%d failed to open vnode query since %s", TD_VID(pVnode), tstrerror(terrno)); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; } // vnode begin if (vnodeBegin(pVnode) < 0) { vError("vgId:%d failed to begin since %s", TD_VID(pVnode), tstrerror(terrno)); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; } // open sync if (vnodeSyncOpen(pVnode, dir)) { vError("vgId:%d failed to open sync since %s", TD_VID(pVnode), tstrerror(terrno)); - terrno = TAOS_SYSTEM_ERROR(errno); + terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; } From 5dadb26070a2bfec9073cc5db3d553f99d4af150 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 May 2022 18:18:54 +0800 Subject: [PATCH 10/21] fix(query): set the correct state value key index. --- source/libs/executor/inc/executorimpl.h | 7 +- source/libs/executor/src/executorimpl.c | 48 ++++++------ source/libs/executor/src/groupoperator.c | 4 +- source/libs/executor/src/timewindowoperator.c | 74 ++++++++++++++----- source/libs/parser/src/parTranslater.c | 4 +- source/libs/parser/src/parUtil.c | 2 +- 6 files changed, 91 insertions(+), 48 deletions(-) diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 34b7fce33c..d0161067dc 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -564,7 +564,7 @@ typedef struct SStateWindowOperatorInfo { SAggSupporter aggSup; SGroupResInfo groupResInfo; SWindowRowsSup winSup; - int32_t colIndex; // start row index + SColumn stateCol; // start row index bool hasKey; SStateKeys stateKey; int32_t tsSlotId; // primary timestamp column slot id @@ -636,7 +636,7 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t int32_t initAggInfo(SOptrBasicInfo* pBasicInfo, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, size_t keyBufSize, const char* pkey); void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows); -void doBuildResultDatablock(SExecTaskInfo *taskInfo, SOptrBasicInfo *pbInfo, SGroupResInfo* pGroupResInfo, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf); +void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pBuf); void finalizeMultiTupleQueryResult(int32_t numOfOutput, SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset); void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset, @@ -659,6 +659,7 @@ void cleanupAggSup(SAggSupporter* pAggSup); void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle); SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode); +SColumn extractColumnFromColumnNode(SColumnNode* pColNode); SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, SArray* pColMatchInfo); SSDataBlock* loadNextDataBlock(void* param); @@ -712,7 +713,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExp SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal, bool multigroupResult, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, - SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, SExecTaskInfo* pTaskInfo); + SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, SColumn* pStateKeyCol, SExecTaskInfo* pTaskInfo); SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SArray* pGroupColList, SExecTaskInfo* pTaskInfo, diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 8db5a282d3..5cb0b812c0 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -155,9 +155,8 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, void operatorDummyCloseFn(void* param, int32_t numOfCols) {} -static int32_t doCopyToSDataBlock(SExecTaskInfo *taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, - SGroupResInfo* pGroupResInfo, int32_t orderType, int32_t* rowCellOffset, - SqlFunctionCtx* pCtx); +static int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, + int32_t orderType, int32_t* rowCellOffset, SqlFunctionCtx* pCtx, int32_t numOfExprs); static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size); static void setResultBufSize(STaskAttr* pQueryAttr, SResultInfo* pResultInfo); @@ -2214,7 +2213,7 @@ void setExecutionContext(int32_t numOfOutput, uint64_t groupId, SExecTaskInfo* p * @param result */ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprInfo* pExprInfo, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, - int32_t orderType, int32_t* rowCellOffset, SqlFunctionCtx* pCtx) { + int32_t orderType, int32_t* rowCellOffset, SqlFunctionCtx* pCtx, int32_t numOfExprs) { int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); int32_t numOfResult = pBlock->info.rows; // there are already exists result rows @@ -2248,13 +2247,12 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn pGroupResInfo->index += 1; - for (int32_t j = 0; j < pBlock->info.numOfCols; ++j) { + for (int32_t j = 0; j < numOfExprs; ++j) { int32_t slotId = pExprInfo[j].base.resSchema.slotId; pCtx[j].resultInfo = getResultCell(pRow, j, rowCellOffset); if (pCtx[j].fpSet.finalize) { - int32_t code = TSDB_CODE_SUCCESS; - code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); + int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(code)) { qError("%s build result data block error, code %s", GET_TASKID(taskInfo), tstrerror(code)); taskInfo->code = code; @@ -2286,10 +2284,13 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock, SExprIn return 0; } -void doBuildResultDatablock(SExecTaskInfo *taskInfo, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SExprInfo* pExprInfo, - SDiskbasedBuf* pBuf) { +void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pBuf) { assert(pGroupResInfo->currentGroup <= pGroupResInfo->totalGroup); + SExprInfo* pExprInfo = pOperator->pExpr; + int32_t numOfExprs = pOperator->numOfExprs; + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + int32_t* rowCellOffset = pbInfo->rowCellInfoOffset; SSDataBlock* pBlock = pbInfo->pRes; SqlFunctionCtx* pCtx = pbInfo->pCtx; @@ -2300,7 +2301,7 @@ void doBuildResultDatablock(SExecTaskInfo *taskInfo, SOptrBasicInfo* pbInfo, SGr } int32_t orderType = TSDB_ORDER_ASC; - doCopyToSDataBlock(taskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, orderType, rowCellOffset, pCtx); + doCopyToSDataBlock(pTaskInfo, pBlock, pExprInfo, pBuf, pGroupResInfo, orderType, rowCellOffset, pCtx, numOfExprs); // add condition (pBlock->info.rows >= 1) just to runtime happy blockDataUpdateTsWindow(pBlock); @@ -3803,7 +3804,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { } blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - doBuildResultDatablock(pTaskInfo, pInfo, &pAggInfo->groupResInfo, pOperator->pExpr, pAggInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, pInfo, &pAggInfo->groupResInfo, pAggInfo->aggSup.pResultBuf); if (pInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pAggInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } @@ -4974,7 +4975,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId; - pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, pTaskInfo); + SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr; + SColumn col = extractColumnFromColumnNode(pColNode); + pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, &col, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_JOIN == type) { SJoinPhysiNode* pJoinNode = (SJoinPhysiNode*)pPhyNode; SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); @@ -5039,6 +5042,17 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi return TSDB_CODE_SUCCESS; } +SColumn extractColumnFromColumnNode(SColumnNode* pColNode) { + SColumn c = {0}; + c.slotId = pColNode->slotId; + c.colId = pColNode->colId; + c.type = pColNode->node.resType.type; + c.bytes = pColNode->node.resType.bytes; + c.scale = pColNode->node.resType.scale; + c.precision = pColNode->node.resType.precision; + return c; +} + SArray* extractColumnInfo(SNodeList* pNodeList) { size_t numOfCols = LIST_LENGTH(pNodeList); SArray* pList = taosArrayInit(numOfCols, sizeof(SColumn)); @@ -5053,15 +5067,7 @@ SArray* extractColumnInfo(SNodeList* pNodeList) { if (nodeType(pNode->pExpr) == QUERY_NODE_COLUMN) { SColumnNode* pColNode = (SColumnNode*)pNode->pExpr; - // todo extract method - SColumn c = {0}; - c.slotId = pColNode->slotId; - c.colId = pColNode->colId; - c.type = pColNode->node.resType.type; - c.bytes = pColNode->node.resType.bytes; - c.scale = pColNode->node.resType.scale; - c.precision = pColNode->node.resType.precision; - + SColumn c = extractColumnFromColumnNode(pColNode); taosArrayPush(pList, &c); } else if (nodeType(pNode->pExpr) == QUERY_NODE_VALUE) { SValueNode* pValNode = (SValueNode*)pNode->pExpr; diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index ac6f0cf881..d8ccac8cea 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -268,7 +268,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { SSDataBlock* pRes = pInfo->binfo.pRes; if (pOperator->status == OP_RES_TO_RETURN) { - doBuildResultDatablock(pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) { pOperator->status = OP_EXEC_DONE; } @@ -317,7 +317,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, false); while(1) { - doBuildResultDatablock(pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doFilter(pInfo->pCondition, pRes); bool hasRemain = hasRemainDataInCurrentGroup(&pInfo->groupResInfo); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 332a116f76..10dc482462 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -806,10 +806,22 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { return TSDB_CODE_SUCCESS; } +static bool compareVal(const char* v, const SStateKeys* pKey) { + if (IS_VAR_DATA_TYPE(pKey->type)) { + if (varDataLen(v) != varDataLen(pKey->pData)) { + return false; + } else { + return strncmp(varDataVal(v), varDataVal(pKey->pData), varDataLen(v)) == 0; + } + } else { + return memcmp(pKey->pData, v, pKey->bytes) == 0; + } +} + static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorInfo* pInfo, SSDataBlock* pBlock) { SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; - SColumnInfoData* pStateColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex); + SColumnInfoData* pStateColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->stateCol.slotId); int64_t gid = pBlock->info.groupId; bool masterScan = true; @@ -822,20 +834,28 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI SWindowRowsSup* pRowSup = &pInfo->winSup; pRowSup->numOfRows = 0; + struct SColumnDataAgg* pAgg = NULL; for (int32_t j = 0; j < pBlock->info.rows; ++j) { - if (colDataIsNull(pStateColInfoData, pBlock->info.rows, j, pBlock->pBlockAgg[pInfo->colIndex])) { + pAgg = (pBlock->pBlockAgg != NULL)? pBlock->pBlockAgg[pInfo->stateCol.slotId]: NULL; + if (colDataIsNull(pStateColInfoData, pBlock->info.rows, j, pAgg)) { continue; } char* val = colDataGetData(pStateColInfoData, j); if (!pInfo->hasKey) { - memcpy(pInfo->stateKey.pData, val, bytes); + // todo extract method + if (IS_VAR_DATA_TYPE(pInfo->stateKey.type)) { + varDataCopy(pInfo->stateKey.pData, val); + } else { + memcpy(pInfo->stateKey.pData, val, bytes); + } + pInfo->hasKey = true; doKeepNewWindowStartInfo(pRowSup, tsList, j); doKeepTuple(pRowSup, tsList[j]); - } else if (memcmp(pInfo->stateKey.pData, val, bytes) == 0) { + } else if (compareVal(val, &pInfo->stateKey)) { doKeepTuple(pRowSup, tsList[j]); if (j == 0 && pRowSup->startRowIndex != 0) { pRowSup->startRowIndex = 0; @@ -861,6 +881,13 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI // here we start a new session window doKeepNewWindowStartInfo(pRowSup, tsList, j); doKeepTuple(pRowSup, tsList[j]); + + // todo extract method + if (IS_VAR_DATA_TYPE(pInfo->stateKey.type)) { + varDataCopy(pInfo->stateKey.pData, val); + } else { + memcpy(pInfo->stateKey.pData, val, bytes); + } } } @@ -888,7 +915,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { SOptrBasicInfo* pBInfo = &pInfo->binfo; if (pOperator->status == OP_RES_TO_RETURN) { - doBuildResultDatablock(pTaskInfo, pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); return NULL; @@ -921,7 +948,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, true); blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); - doBuildResultDatablock(pTaskInfo, pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } @@ -948,7 +975,7 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) { } blockDataEnsureCapacity(pBlock, pOperator->resultInfo.capacity); - doBuildResultDatablock(pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pBlock->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); @@ -1012,7 +1039,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { } if (pOperator->status == OP_RES_TO_RETURN) { - doBuildResultDatablock(pOperator->pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) { pOperator->status = OP_EXEC_DONE; } @@ -1053,7 +1080,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated); blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity); - doBuildResultDatablock(pOperator->pTaskInfo, &pInfo->binfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); // TODO: remove for stream /*ASSERT(pInfo->binfo.pRes->info.rows > 0);*/ @@ -1283,7 +1310,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { SOptrBasicInfo* pBInfo = &pInfo->binfo; if (pOperator->status == OP_RES_TO_RETURN) { - doBuildResultDatablock(pOperator->pTaskInfo, pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); return NULL; @@ -1316,7 +1343,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, true); blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); - doBuildResultDatablock(pOperator->pTaskInfo, pBInfo, &pInfo->groupResInfo, pOperator->pExpr, pInfo->aggSup.pResultBuf); + doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); if (pBInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } @@ -1406,14 +1433,21 @@ _error: SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, SSDataBlock* pResBlock, STimeWindowAggSupp* pTwAggSup, int32_t tsSlotId, - SExecTaskInfo* pTaskInfo) { + SColumn* pStateKeyCol, SExecTaskInfo* pTaskInfo) { SStateWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStateWindowOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->colIndex = -1; + pInfo->stateCol = *pStateKeyCol; + pInfo->stateKey.type = pInfo->stateCol.type; + pInfo->stateKey.bytes = pInfo->stateCol.bytes; + pInfo->stateKey.pData = taosMemoryCalloc(1, pInfo->stateCol.bytes); + if (pInfo->stateKey.pData == NULL) { + goto _error; + } + size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; initResultSizeInfo(pOperator, 4096); @@ -1423,15 +1457,15 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf pInfo->twAggSup = *pTwAggSup; initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); - pInfo->tsSlotId = tsSlotId; - pOperator->name = "StateWindowOperator"; + pInfo->tsSlotId = tsSlotId; + pOperator->name = "StateWindowOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STATE_WINDOW; - pOperator->blocking = true; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExpr; + pOperator->blocking = true; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExpr; pOperator->numOfExprs = numOfCols; - pOperator->pTaskInfo = pTaskInfo; - pOperator->info = pInfo; + pOperator->pTaskInfo = pTaskInfo; + pOperator->info = pInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStateWindowAgg, NULL, NULL, destroyStateWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index c86c1ac2e9..b7f007fce6 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1538,7 +1538,9 @@ static EDealRes checkStateExpr(SNode* pNode, void* pContext) { if (QUERY_NODE_COLUMN == nodeType(pNode)) { STranslateContext* pCxt = pContext; SColumnNode* pCol = (SColumnNode*)pNode; - if (!IS_INTEGER_TYPE(pCol->node.resType.type)) { + + int32_t type = pCol->node.resType.type; + if (!IS_INTEGER_TYPE(type) && type != TSDB_DATA_TYPE_BOOL && !IS_VAR_DATA_TYPE(type)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_STATE_WIN_TYPE); } if (COLUMN_TYPE_TAG == pCol->colType) { diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c index 43aea8de7c..7183d956a9 100644 --- a/source/libs/parser/src/parUtil.c +++ b/source/libs/parser/src/parUtil.c @@ -91,7 +91,7 @@ static char* getSyntaxErrFormat(int32_t errCode) { case TSDB_CODE_PAR_AGG_FUNC_NESTING: return "Aggregate functions do not support nesting"; case TSDB_CODE_PAR_INVALID_STATE_WIN_TYPE: - return "Only support STATE_WINDOW on integer column"; + return "Only support STATE_WINDOW on integer/bool/varchar column"; case TSDB_CODE_PAR_INVALID_STATE_WIN_COL: return "Not support STATE_WINDOW on tag column"; case TSDB_CODE_PAR_INVALID_STATE_WIN_TABLE: From 4d2ac0af0bf92c7f31e5f4d963df1489ab2c3e46 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 13 May 2022 18:19:49 +0800 Subject: [PATCH 11/21] test: Print the executed sql statements in the script files. --- tests/system-test/2-query/join.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py index 289dd3d62d..8fc131e581 100644 --- a/tests/system-test/2-query/join.py +++ b/tests/system-test/2-query/join.py @@ -28,7 +28,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") - tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), True) def __query_condition(self,tbname): query_condition = [] From d7e34a66424414cd36f8dbca72a8d0a55c0bc7c1 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Fri, 13 May 2022 18:39:37 +0800 Subject: [PATCH 12/21] fix(tmq): set config --- example/src/tmq.c | 6 +++--- source/client/src/tmq.c | 28 ++++++++++++++++++---------- source/dnode/vnode/src/tq/tq.c | 1 + 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/example/src/tmq.c b/example/src/tmq.c index e867f17e78..b4013f26ee 100644 --- a/example/src/tmq.c +++ b/example/src/tmq.c @@ -61,7 +61,7 @@ int32_t init_env() { taos_free_result(pRes); pRes = - taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)"); + taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int)"); if (taos_errno(pRes) != 0) { printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes)); return -1; @@ -106,8 +106,8 @@ int32_t create_topic() { } taos_free_result(pRes); - /*pRes = taos_query(pConn, "create topic topic_ctb_column as abc1");*/ - pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1"); + pRes = taos_query(pConn, "create topic topic_ctb_column as abc1"); + /*pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");*/ if (taos_errno(pRes) != 0) { printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index a6b8b842f9..639f00ab86 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -185,6 +185,7 @@ typedef struct { int32_t async; tsem_t rspSem; tmq_resp_err_t rspErr; + SArray* offsets; } SMqCommitCbParam; tmq_conf_t* tmq_conf_new() { @@ -246,10 +247,13 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value if (strcmp(key, "msg.with.table.name") == 0) { if (strcmp(value, "true") == 0) { conf->withTbName = 1; + return TMQ_CONF_OK; } else if (strcmp(value, "false") == 0) { conf->withTbName = 0; + return TMQ_CONF_OK; } else if (strcmp(value, "none") == 0) { conf->withTbName = -1; + return TMQ_CONF_OK; } else { return TMQ_CONF_INVALID; } @@ -395,6 +399,9 @@ int32_t tmqCommitCb(void* param, const SDataBuf* pMsg, int32_t code) { if (!pParam->async) tsem_post(&pParam->rspSem); else { + if (pParam->offsets) { + taosArrayDestroy(pParam->offsets); + } tsem_destroy(&pParam->rspSem); /*if (pParam->pArray) {*/ /*taosArrayDestroy(pParam->pArray);*/ @@ -540,10 +547,10 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in // build msg // send to mnode SMqCMCommitOffsetReq req; - SArray* pArray = NULL; + SArray* pOffsets = NULL; if (offsets == NULL) { - pArray = taosArrayInit(0, sizeof(SMqOffset)); + pOffsets = taosArrayInit(0, sizeof(SMqOffset)); for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); for (int j = 0; j < taosArrayGetSize(pTopic->vgs); j++) { @@ -553,11 +560,11 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in strcpy(offset.cgroup, tmq->groupId); offset.vgId = pVg->vgId; offset.offset = pVg->currentOffset; - taosArrayPush(pArray, &offset); + taosArrayPush(pOffsets, &offset); } } - req.num = pArray->size; - req.offsets = pArray->pData; + req.num = pOffsets->size; + req.offsets = pOffsets->pData; } else { req.num = taosArrayGetSize(&offsets->container); req.offsets = (SMqOffset*)offsets->container.pData; @@ -591,6 +598,7 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in pParam->tmq = tmq; tsem_init(&pParam->rspSem, 0, 0); pParam->async = async; + pParam->offsets = pOffsets; pRequest->body.requestMsg = (SDataBuf){ .pData = buf, @@ -613,8 +621,8 @@ tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, in tsem_destroy(&pParam->rspSem); taosMemoryFree(pParam); - if (pArray) { - taosArrayDestroy(pArray); + if (pOffsets) { + taosArrayDestroy(pOffsets); } } @@ -1015,7 +1023,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { atomic_store_32(&tmq->epSkipCnt, 0); #endif int32_t tlen = sizeof(SMqAskEpReq); - SMqAskEpReq* req = taosMemoryMalloc(tlen); + SMqAskEpReq* req = taosMemoryCalloc(1, tlen); if (req == NULL) { tscError("failed to malloc get subscribe ep buf"); /*atomic_store_8(&tmq->epStatus, 0);*/ @@ -1025,7 +1033,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { req->epoch = htonl(tmq->epoch); strcpy(req->cgroup, tmq->groupId); - SMqAskEpCbParam* pParam = taosMemoryMalloc(sizeof(SMqAskEpCbParam)); + SMqAskEpCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam)); if (pParam == NULL) { tscError("failed to malloc subscribe param"); taosMemoryFree(req); @@ -1107,7 +1115,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t waitTime, SMqClientTopic* reqOffset = tmq->resetOffsetCfg; } - SMqPollReq* pReq = taosMemoryMalloc(sizeof(SMqPollReq)); + SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq)); if (pReq == NULL) { return NULL; } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 28cdb39bd5..9526451907 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -559,6 +559,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { } // db subscribe } else if (pExec->subType == TOPIC_SUB_TYPE__DB) { + rsp.withSchema = 1; STqReadHandle* pReader = pExec->pExecReader[workerId]; tqReadHandleSetMsg(pReader, pCont, 0); while (tqNextDataBlock(pReader)) { From eba47b9ef4d34da14b64360d5449c20cf317cc42 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 13 May 2022 18:54:29 +0800 Subject: [PATCH 13/21] [test: add valgrind run tmq] --- tests/system-test/99-TDcase/TD-15557.py | 206 +++++++++++++++++++++++- 1 file changed, 198 insertions(+), 8 deletions(-) diff --git a/tests/system-test/99-TDcase/TD-15557.py b/tests/system-test/99-TDcase/TD-15557.py index 4798bb7c8d..e005985fe0 100644 --- a/tests/system-test/99-TDcase/TD-15557.py +++ b/tests/system-test/99-TDcase/TD-15557.py @@ -49,6 +49,19 @@ class TDTestCase: print(cur) return cur + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg,showRow,cdbName,valgrind): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) @@ -113,9 +126,8 @@ class TDTestCase: parameterDict["startTs"]) return - def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: Produce while consume to subscribe one db") + tdLog.printNoPrefix("======== test case 1: Produce while one consume to subscribe one db") tdLog.info("step 1: create database, stb, ctb and insert data") # create and start thread parameterDict = {'cfg': '', \ @@ -153,7 +165,7 @@ class TDTestCase: auto.offset.reset:earliest' sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) - tdSql.query(sql) + tdSql.query(sql) event.wait() @@ -162,11 +174,8 @@ class TDTestCase: showMsg = 1 showRow = 1 - shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) - shellCmd += "> /dev/null 2>&1 &" - tdLog.info(shellCmd) - os.system(shellCmd) + valgrind = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName,valgrind) # wait for data ready prepareEnvThread.join() @@ -190,6 +199,187 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: Produce while two consumers to subscribe one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db2', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + consumerId = 1 + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + + valgrind = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName,valgrind) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 2: + break + else: + time.sleep(5) + + consumerId0 = tdSql.getData(0 , 1) + consumerId1 = tdSql.getData(1 , 1) + actConsumeRows0 = tdSql.getData(0 , 3) + actConsumeRows1 = tdSql.getData(1 , 3) + + tdLog.info("consumer %d rows: %d"%(consumerId0, actConsumeRows0)) + tdLog.info("consumer %d rows: %d"%(consumerId1, actConsumeRows1)) + + totalConsumeRows = actConsumeRows0 + actConsumeRows1 + if totalConsumeRows != expectrowcnt: + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: Produce while one consumers to subscribe one db, include 2 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db3', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db3', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + # consumerId = 1 + # sql = "insert into %s.consumeinfo values "%cdbName + # sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + # tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + valgrind = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName,valgrind) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + consumerId0 = tdSql.getData(0 , 1) + #consumerId1 = tdSql.getData(1 , 1) + actConsumeRows0 = tdSql.getData(0 , 3) + #actConsumeRows1 = tdSql.getData(1 , 3) + + tdLog.info("consumer %d rows: %d"%(consumerId0, actConsumeRows0)) + #tdLog.info("consumer %d rows: %d"%(consumerId1, actConsumeRows1)) + + #totalConsumeRows = actConsumeRows0 + actConsumeRows1 + if actConsumeRows0 != expectrowcnt: + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + def run(self): tdSql.prepare() From 8873e392fb361d5787e785fdb67dbbadb2641258 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 May 2022 18:59:44 +0800 Subject: [PATCH 14/21] enh: enable limit in shell --- tools/shell/src/shellArguments.c | 2 +- tools/shell/src/shellEngine.c | 79 ++++++++++++++++++++++++-------- 2 files changed, 62 insertions(+), 19 deletions(-) diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 13f8cde3e3..1639fd1ca6 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -332,7 +332,7 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { shellInitArgs(argc, argv); shell.info.clientVersion = "Welcome to the TDengine shell from %s, Client Version:%s\n" - "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; + "Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.\n\n"; shell.info.promptHeader = "taos> "; shell.info.promptContinue = " -> "; shell.info.promptSize = 6; diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 21fd3d0359..8f0d39377e 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -29,11 +29,11 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD static int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres); static void shellPrintNChar(const char *str, int32_t length, int32_t width); static void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t length, int32_t precision); -static int32_t shellVerticalPrintResult(TAOS_RES *tres); +static int32_t shellVerticalPrintResult(TAOS_RES *tres, const char *sql); static int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision); static void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields); -static int32_t shellHorizontalPrintResult(TAOS_RES *tres); -static int32_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool vertical); +static int32_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql); +static int32_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool vertical, const char *sql); static void shellReadHistory(); static void shellWriteHistory(); static void shellPrintError(TAOS_RES *tres, int64_t st); @@ -121,7 +121,7 @@ int32_t shellRunCommand(char *command) { char quote = 0, *cmd = command; for (char c = *command++; c != 0; c = *command++) { if (c == '\\' && (*command == '\'' || *command == '"' || *command == '`')) { - command ++; + command++; continue; } @@ -190,7 +190,7 @@ void shellRunSingleCommandImp(char *command) { if (pFields != NULL) { // select and show kinds of commands int32_t error_no = 0; - int32_t numOfRows = shellDumpResult(pSql, fname, &error_no, printMode); + int32_t numOfRows = shellDumpResult(pSql, fname, &error_no, printMode, command); if (numOfRows < 0) return; et = taosGetTimestampUs(); @@ -272,6 +272,7 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i return; } + int n; char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: @@ -280,20 +281,37 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i case TSDB_DATA_TYPE_TINYINT: taosFprintfFile(pFile, "%d", *((int8_t *)val)); break; + case TSDB_DATA_TYPE_UTINYINT: + taosFprintfFile(pFile, "%u", *((uint8_t *)val)); + break; case TSDB_DATA_TYPE_SMALLINT: taosFprintfFile(pFile, "%d", *((int16_t *)val)); break; + case TSDB_DATA_TYPE_USMALLINT: + taosFprintfFile(pFile, "%u", *((uint16_t *)val)); + break; case TSDB_DATA_TYPE_INT: taosFprintfFile(pFile, "%d", *((int32_t *)val)); break; + case TSDB_DATA_TYPE_UINT: + taosFprintfFile(pFile, "%u", *((uint32_t *)val)); + break; case TSDB_DATA_TYPE_BIGINT: taosFprintfFile(pFile, "%" PRId64, *((int64_t *)val)); break; + case TSDB_DATA_TYPE_UBIGINT: + taosFprintfFile(pFile, "%" PRIu64, *((uint64_t *)val)); + break; case TSDB_DATA_TYPE_FLOAT: taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val)); break; case TSDB_DATA_TYPE_DOUBLE: - taosFprintfFile(pFile, "%.9f", GET_DOUBLE_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val)); + if (n > MAX(25, length)) { + taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val)); + } else { + taosFprintfFile(pFile, "%s", buf); + } break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: @@ -435,6 +453,7 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t return; } + int n; char buf[TSDB_MAX_BYTES_PER_ROW]; switch (field->type) { case TSDB_DATA_TYPE_BOOL: @@ -468,7 +487,12 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t printf("%*.5f", width, GET_FLOAT_VAL(val)); break; case TSDB_DATA_TYPE_DOUBLE: - printf("%*.9f", width, GET_DOUBLE_VAL(val)); + n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val)); + if (n > MAX(25, width)) { + printf("%*.15e", width, GET_DOUBLE_VAL(val)); + } else { + printf("%s", buf); + } break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: @@ -483,7 +507,16 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t } } -int32_t shellVerticalPrintResult(TAOS_RES *tres) { +bool shellIsLimitQuery(const char *sql) { + //todo refactor + if (strstr(sql, "limit") != NULL || strstr(sql, "LIMIT") != NULL) { + return true; + } + + return false; +} + +int32_t shellVerticalPrintResult(TAOS_RES *tres, const char *sql) { TAOS_ROW row = taos_fetch_row(tres); if (row == NULL) { return 0; @@ -503,7 +536,7 @@ int32_t shellVerticalPrintResult(TAOS_RES *tres) { uint64_t resShowMaxNum = UINT64_MAX; - if (shell.args.commands == NULL && shell.args.file[0] == 0) { + if (shell.args.commands == NULL && shell.args.file[0] == 0 && !shellIsLimitQuery(sql)) { resShowMaxNum = SHELL_DEFAULT_RES_SHOW_NUM; } @@ -525,8 +558,13 @@ int32_t shellVerticalPrintResult(TAOS_RES *tres) { putchar('\n'); } } else if (showMore) { - printf("[100 Rows showed, and more rows are fetching but will not be showed. You can ctrl+c to stop or wait.]\n"); - printf("[You can add limit statement to get more or redirect results to specific file to get all.]\n"); + printf("\n"); + printf(" Notice: The result shows only the first %d rows.\n", SHELL_DEFAULT_RES_SHOW_NUM); + printf(" You can use the `LIMIT` clause to get fewer result to show.\n"); + printf(" Or use '>>' to redirect the whole set of the result to a specified file.\n"); + printf("\n"); + printf(" You can use Ctrl+C to stop the underway fetching.\n"); + printf("\n"); showMore = 0; } @@ -618,7 +656,7 @@ void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields) { putchar('\n'); } -int32_t shellHorizontalPrintResult(TAOS_RES *tres) { +int32_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql) { TAOS_ROW row = taos_fetch_row(tres); if (row == NULL) { return 0; @@ -637,7 +675,7 @@ int32_t shellHorizontalPrintResult(TAOS_RES *tres) { uint64_t resShowMaxNum = UINT64_MAX; - if (shell.args.commands == NULL && shell.args.file[0] == 0) { + if (shell.args.commands == NULL && shell.args.file[0] == 0 && !shellIsLimitQuery(sql)) { resShowMaxNum = SHELL_DEFAULT_RES_SHOW_NUM; } @@ -655,8 +693,13 @@ int32_t shellHorizontalPrintResult(TAOS_RES *tres) { } putchar('\n'); } else if (showMore) { - printf("[100 Rows showed, and more rows are fetching but will not be showed. You can ctrl+c to stop or wait.]\n"); - printf("[You can add limit statement to show more or redirect results to specific file to get all.]\n"); + printf("\n"); + printf(" Notice: The result shows only the first %d rows.\n", SHELL_DEFAULT_RES_SHOW_NUM); + printf(" You can use the `LIMIT` clause to get fewer result to show.\n"); + printf(" Or use '>>' to redirect the whole set of the result to a specified file.\n"); + printf("\n"); + printf(" You can use Ctrl+C to stop the underway fetching.\n"); + printf("\n"); showMore = 0; } @@ -667,14 +710,14 @@ int32_t shellHorizontalPrintResult(TAOS_RES *tres) { return numOfRows; } -int32_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool vertical) { +int32_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool vertical, const char *sql) { int32_t numOfRows = 0; if (fname != NULL) { numOfRows = shellDumpResultToFile(fname, tres); } else if (vertical) { - numOfRows = shellVerticalPrintResult(tres); + numOfRows = shellVerticalPrintResult(tres, sql); } else { - numOfRows = shellHorizontalPrintResult(tres); + numOfRows = shellHorizontalPrintResult(tres, sql); } *error_no = taos_errno(tres); From b8f7526361bceda08972371088d995f0a7034964 Mon Sep 17 00:00:00 2001 From: Liu Jicong Date: Fri, 13 May 2022 19:09:36 +0800 Subject: [PATCH 15/21] fix(tmq): msg schema --- source/dnode/vnode/src/inc/tq.h | 1 + source/dnode/vnode/src/tq/tqRead.c | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 5eb89e8bb7..38dedee5a2 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -93,6 +93,7 @@ struct STqReadHandle { SMeta* pVnodeMeta; SArray* pColIdList; // SArray int32_t sver; + int64_t cachedSchemaUid; SSchemaWrapper* pSchemaWrapper; STSchema* pSchema; }; diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index f531d3f5fb..996d789e24 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -25,6 +25,7 @@ STqReadHandle* tqInitSubmitMsgScanner(SMeta* pMeta) { pReadHandle->ver = -1; pReadHandle->pColIdList = NULL; pReadHandle->sver = -1; + pReadHandle->cachedSchemaUid = -1; pReadHandle->pSchema = NULL; pReadHandle->pSchemaWrapper = NULL; pReadHandle->tbIdHash = NULL; @@ -84,19 +85,20 @@ bool tqNextDataBlock(STqReadHandle* pHandle) { return false; } -int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* pGroupId, uint64_t* pUid, int32_t* pNumOfRows, - int16_t* pNumOfCols) { +int32_t tqRetrieveDataBlock(SArray** ppCols, STqReadHandle* pHandle, uint64_t* pGroupId, uint64_t* pUid, + int32_t* pNumOfRows, int16_t* pNumOfCols) { /*int32_t sversion = pHandle->pBlock->sversion;*/ // TODO set to real sversion *pUid = 0; int32_t sversion = 0; - if (pHandle->sver != sversion) { + if (pHandle->sver != sversion || pHandle->cachedSchemaUid != pHandle->msgIter.suid) { pHandle->pSchema = metaGetTbTSchema(pHandle->pVnodeMeta, pHandle->msgIter.uid, sversion); // this interface use suid instead of uid pHandle->pSchemaWrapper = metaGetTableSchema(pHandle->pVnodeMeta, pHandle->msgIter.suid, sversion, true); pHandle->sver = sversion; + pHandle->cachedSchemaUid = pHandle->msgIter.suid; } STSchema* pTschema = pHandle->pSchema; From 1e8ac7b06c7ac75425440ea91522670537278174 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 13 May 2022 19:55:05 +0800 Subject: [PATCH 16/21] test: modify debug flag --- tests/system-test/99-TDcase/TD-15554.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/system-test/99-TDcase/TD-15554.py b/tests/system-test/99-TDcase/TD-15554.py index fb3817ed89..890580ca2c 100644 --- a/tests/system-test/99-TDcase/TD-15554.py +++ b/tests/system-test/99-TDcase/TD-15554.py @@ -13,12 +13,11 @@ from util.dnodes import * class TDTestCase: hostname = socket.gethostname() - #rpcDebugFlagVal = '143' - #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} - #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal - #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} - #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal - #print ("===================: ", updatecfgDict) + + clientCfgDict = {'qdebugflag':'143'} + updatecfgDict = {'clientCfg': {}, 'qdebugflag':'143'} + updatecfgDict["clientCfg"] = clientCfgDict + print ("===================: ", updatecfgDict) def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") From dddfe5c144a674b66dce6c06fbf02630d4676080 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Fri, 13 May 2022 20:26:12 +0800 Subject: [PATCH 17/21] enh: enable limit in shell --- tools/shell/src/shellEngine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 8f0d39377e..8b126cf90e 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -509,7 +509,7 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t bool shellIsLimitQuery(const char *sql) { //todo refactor - if (strstr(sql, "limit") != NULL || strstr(sql, "LIMIT") != NULL) { + if (strcasestr(sql, " limit ") != NULL) { return true; } From a31dc4c2b95cc70ccadf5fd2f94f20374c8d7908 Mon Sep 17 00:00:00 2001 From: tangfangzhi Date: Fri, 13 May 2022 21:10:34 +0800 Subject: [PATCH 18/21] enh: user branch name as log dir name --- Jenkinsfile2 | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile2 b/Jenkinsfile2 index a3006c4f7d..f51ddbbac3 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -33,7 +33,18 @@ def abort_previous(){ milestone(buildNumber) } def pre_test(){ - sh 'hostname' + sh ''' + hostname + date + ''' + sh ''' + cd ${WK} + git reset --hard + git fetch || git fetch + cd ${WKC} + git reset --hard + git fetch || git fetch + ''' script { if (env.CHANGE_TARGET == 'master') { sh ''' @@ -235,7 +246,7 @@ pipeline { sh ''' cd ${WKC}/tests/parallel_test date - time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${CHANGE_TARGET} -l ${WKDIR}/log + time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log ''' } } From 24176b9d46d88768ff5f022583791093be539719 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 13 May 2022 21:34:57 +0800 Subject: [PATCH 19/21] test: printf info into log file --- tests/test/c/tmqSim.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index 33ddd23d8c..1228d6174c 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -179,7 +179,7 @@ void parseArgument(int32_t argc, char* argv[]) { } else if (strcmp(argv[i], "-y") == 0) { g_stConfInfo.consumeDelay = atol(argv[++i]); } else { - printf("%s unknow para: %s %s", GREEN, argv[++i], NC); + pError("%s unknow para: %s %s", GREEN, argv[++i], NC); exit(-1); } } @@ -259,7 +259,7 @@ int queryDB(TAOS* taos, char* command) { } static void tmq_commit_cb_print(tmq_t* tmq, tmq_resp_err_t resp, tmq_topic_vgroup_list_t* offsets, void* param) { - printf("tmq_commit_cb_print() commit %d\n", resp); + pError("tmq_commit_cb_print() commit %d\n", resp); } void build_consumer(SThreadInfo* pInfo) { @@ -318,7 +318,7 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) { TAOS_RES* pRes = taos_query(pConn, sqlStr); if (taos_errno(pRes) != 0) { - printf("error in save consumeinfo, reason:%s\n", taos_errstr(pRes)); + pError("error in save consumeinfo, reason:%s\n", taos_errstr(pRes)); taos_free_result(pRes); exit(-1); } @@ -375,7 +375,7 @@ void* consumeThreadFunc(void* param) { tmq_resp_err_t err = tmq_subscribe(pInfo->tmq, pInfo->topicList); if (err) { - printf("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err)); + pError("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err)); exit(-1); } @@ -388,14 +388,14 @@ void* consumeThreadFunc(void* param) { err = tmq_unsubscribe(pInfo->tmq); if (err) { - printf("tmq_unsubscribe() fail, reason: %s\n", tmq_err2str(err)); + pError("tmq_unsubscribe() fail, reason: %s\n", tmq_err2str(err)); pInfo->consumeMsgCnt = -1; return NULL; } err = tmq_consumer_close(pInfo->tmq); if (err) { - printf("tmq_consumer_close() fail, reason: %s\n", tmq_err2str(err)); + pError("tmq_consumer_close() fail, reason: %s\n", tmq_err2str(err)); exit(-1); } pInfo->tmq = NULL; @@ -451,7 +451,7 @@ int32_t getConsumeInfo() { sprintf(sqlStr, "select * from %s.consumeinfo", g_stConfInfo.cdbName); TAOS_RES* pRes = taos_query(pConn, sqlStr); if (taos_errno(pRes) != 0) { - printf("error in get consumeinfo, reason:%s\n", taos_errstr(pRes)); + pError("error in get consumeinfo, reason:%s\n", taos_errstr(pRes)); taosFprintfFile(g_fp, "error in get consumeinfo, reason:%s\n", taos_errstr(pRes)); taosCloseFile(&g_fp); taos_free_result(pRes); From 7898334fd81bf1e0b52a9f36a52a85489834b6b0 Mon Sep 17 00:00:00 2001 From: plum-lihui Date: Fri, 13 May 2022 21:36:42 +0800 Subject: [PATCH 20/21] test:add test case for tmq --- tests/system-test/7-tmq/subscribeDb.py | 400 ++++++++++++++++++++++++ tests/system-test/99-TDcase/TD-15563.py | 400 ++++++++++++++++++++++++ 2 files changed, 800 insertions(+) create mode 100644 tests/system-test/7-tmq/subscribeDb.py create mode 100644 tests/system-test/99-TDcase/TD-15563.py diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py new file mode 100644 index 0000000000..b8d3abca5c --- /dev/null +++ b/tests/system-test/7-tmq/subscribeDb.py @@ -0,0 +1,400 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg,showRow,cdbName,valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: Produce while one consume to subscribe one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db1', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 200, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + tdLog.info("consumer result: %d, %d"%(tdSql.getData(0 , 2), tdSql.getData(0 , 3))) + tdSql.checkData(0 , 1, consumerId) + # mulit rows and mulit tables in one sql, this num of msg is not sure + #tdSql.checkData(0 , 2, expectmsgcnt) + tdSql.checkData(0 , 3, expectrowcnt+1) + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: Produce while two consumers to subscribe one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db2', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + consumerId = 1 + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 2: + break + else: + time.sleep(5) + + consumerId0 = tdSql.getData(0 , 1) + consumerId1 = tdSql.getData(1 , 1) + actConsumeRows0 = tdSql.getData(0 , 3) + actConsumeRows1 = tdSql.getData(1 , 3) + + tdLog.info("consumer %d rows: %d"%(consumerId0, actConsumeRows0)) + tdLog.info("consumer %d rows: %d"%(consumerId1, actConsumeRows1)) + + totalConsumeRows = actConsumeRows0 + actConsumeRows1 + if totalConsumeRows != expectrowcnt + 2: + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: Produce while one consumers to subscribe one db, include 2 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db3', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db3', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + # consumerId = 1 + # sql = "insert into %s.consumeinfo values "%cdbName + # sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + # tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + consumerId0 = tdSql.getData(0 , 1) + #consumerId1 = tdSql.getData(1 , 1) + actConsumeRows0 = tdSql.getData(0 , 3) + #actConsumeRows1 = tdSql.getData(1 , 3) + + tdLog.info("consumer %d rows: %d"%(consumerId0, actConsumeRows0)) + #tdLog.info("consumer %d rows: %d"%(consumerId1, actConsumeRows1)) + + #totalConsumeRows = actConsumeRows0 + actConsumeRows1 + if actConsumeRows0 != expectrowcnt + 1: + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + #self.tmqCase1(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + #self.tmqCase3(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/99-TDcase/TD-15563.py b/tests/system-test/99-TDcase/TD-15563.py new file mode 100644 index 0000000000..b8d3abca5c --- /dev/null +++ b/tests/system-test/99-TDcase/TD-15563.py @@ -0,0 +1,400 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg,showRow,cdbName,valgrind=0): + shellCmd = 'nohup ' + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + def tmqCase1(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 1: Produce while one consume to subscribe one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db1', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 200, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + tdLog.info("consumer result: %d, %d"%(tdSql.getData(0 , 2), tdSql.getData(0 , 3))) + tdSql.checkData(0 , 1, consumerId) + # mulit rows and mulit tables in one sql, this num of msg is not sure + #tdSql.checkData(0 , 2, expectmsgcnt) + tdSql.checkData(0 , 3, expectrowcnt+1) + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 1 end ...... ") + + def tmqCase2(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 2: Produce while two consumers to subscribe one db") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db2', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + consumerId = 1 + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 2: + break + else: + time.sleep(5) + + consumerId0 = tdSql.getData(0 , 1) + consumerId1 = tdSql.getData(1 , 1) + actConsumeRows0 = tdSql.getData(0 , 3) + actConsumeRows1 = tdSql.getData(1 , 3) + + tdLog.info("consumer %d rows: %d"%(consumerId0, actConsumeRows0)) + tdLog.info("consumer %d rows: %d"%(consumerId1, actConsumeRows1)) + + totalConsumeRows = actConsumeRows0 + actConsumeRows1 + if totalConsumeRows != expectrowcnt + 2: + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 2 end ...... ") + + def tmqCase3(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 3: Produce while one consumers to subscribe one db, include 2 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db3', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db3', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 100000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName'])) + + tdLog.info("create consume info table and consume result table") + cdbName = parameterDict["dbName"] + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + tdSql.query(sql) + + # consumerId = 1 + # sql = "insert into %s.consumeinfo values "%cdbName + # sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) + # tdSql.query(sql) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 5 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName) + + # wait for data ready + prepareEnvThread.join() + prepareEnvThread2.join() + + tdLog.info("insert process end, and start to check consume result") + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == 1: + break + else: + time.sleep(5) + + consumerId0 = tdSql.getData(0 , 1) + #consumerId1 = tdSql.getData(1 , 1) + actConsumeRows0 = tdSql.getData(0 , 3) + #actConsumeRows1 = tdSql.getData(1 , 3) + + tdLog.info("consumer %d rows: %d"%(consumerId0, actConsumeRows0)) + #tdLog.info("consumer %d rows: %d"%(consumerId1, actConsumeRows1)) + + #totalConsumeRows = actConsumeRows0 + actConsumeRows1 + if actConsumeRows0 != expectrowcnt + 1: + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 3 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + #self.tmqCase1(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + #self.tmqCase3(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 0815843e94baf90150b749841087db2a173a9507 Mon Sep 17 00:00:00 2001 From: afwerar <1296468573@qq.com> Date: Fri, 13 May 2022 21:42:16 +0800 Subject: [PATCH 21/21] fix(os): make taosd.exe taos.exe run on windows. --- contrib/CMakeLists.txt | 2 +- include/util/tjson.h | 17 +-- source/client/inc/clientStmt.h | 2 +- source/client/src/clientSml.c | 8 +- source/dnode/vnode/src/vnd/vnodeCfg.c | 98 ++++++++++------ source/dnode/vnode/src/vnd/vnodeCommit.c | 7 +- source/libs/function/src/tudf.c | 24 ++++ source/libs/nodes/src/nodesCodeFuncs.c | 75 ++++++------ source/os/src/osAtomic.c | 30 ++--- source/os/src/osFile.c | 72 ++++++------ source/os/src/osMemory.c | 2 +- source/os/src/osProc.c | 4 + source/os/src/osShm.c | 4 + source/os/src/osSocket.c | 11 +- source/os/src/osSysinfo.c | 33 +++++- source/os/src/osSystem.c | 8 +- source/util/src/tconfig.c | 9 +- tools/shell/src/shellCommand.c | 143 +++++++---------------- tools/shell/src/shellEngine.c | 4 +- 19 files changed, 299 insertions(+), 254 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 926fbc8957..14a85ee4f6 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -365,7 +365,7 @@ if(${BUILD_ADDR2LINE}) if(HAVE_LIBELF_H OR HAVE_LIBELF_LIBELF_H) target_link_libraries(libdwarf PUBLIC libelf) endif() - target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_BINARY_DIR}/contrib) + target_include_directories(libdwarf SYSTEM PUBLIC "libdwarf/src/lib/libdwarf" ${CMAKE_CURRENT_BINARY_DIR}) file(READ "addr2line/addr2line.c" ADDR2LINE_CONTENT) string(REPLACE "static int" "int" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") string(REPLACE "static void" "void" ADDR2LINE_CONTENT "${ADDR2LINE_CONTENT}") diff --git a/include/util/tjson.h b/include/util/tjson.h index d23f7b402e..a95efe56e7 100644 --- a/include/util/tjson.h +++ b/include/util/tjson.h @@ -22,17 +22,12 @@ extern "C" { #endif -#ifdef WINDOWS -#define tjsonGetNumberValue(pJson, pName, val) -1 -#else -#define tjsonGetNumberValue(pJson, pName, val) \ - ({ \ - uint64_t _tmp = 0; \ - int32_t _code = tjsonGetUBigIntValue(pJson, pName, &_tmp); \ - val = _tmp; \ - _code; \ - }) -#endif +#define tjsonGetNumberValue(pJson, pName, val, code) \ + do { \ + uint64_t _tmp = 0; \ + code = tjsonGetUBigIntValue(pJson, pName, &_tmp); \ + val = _tmp; \ + } while (0) typedef void SJson; diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h index ae27e611cb..32625b4c51 100644 --- a/source/client/inc/clientStmt.h +++ b/source/client/inc/clientStmt.h @@ -63,7 +63,7 @@ typedef struct SStmtBindInfo { int8_t tbType; bool tagsCached; void* boundTags; - char tbName[TSDB_TABLE_FNAME_LEN];; + char tbName[TSDB_TABLE_FNAME_LEN]; char tbFName[TSDB_TABLE_FNAME_LEN]; char stbFName[TSDB_TABLE_FNAME_LEN]; SName sname; diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 3e71714f21..dfb1942824 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1985,8 +1985,8 @@ static int32_t smlParseInfluxLine(SSmlHandle* info, const char* sql) { (*oneTable)->sTableName = elements.measure; (*oneTable)->sTableNameLen = elements.measureLen; - RandTableName rName = {.tags=(*oneTable)->tags, .sTableName=(*oneTable)->sTableName, .sTableNameLen=(uint8_t)(*oneTable)->sTableNameLen, - .childTableName=(*oneTable)->childTableName}; + RandTableName rName = { (*oneTable)->tags, (*oneTable)->sTableName, (uint8_t)(*oneTable)->sTableNameLen, + (*oneTable)->childTableName, 0 }; buildChildTableName(&rName); (*oneTable)->uid = rName.uid; @@ -2045,8 +2045,8 @@ static int32_t smlParseTelnetLine(SSmlHandle* info, void *data) { } taosHashClear(info->dumplicateKey); - RandTableName rName = {.tags=tinfo->tags, .sTableName=tinfo->sTableName, .sTableNameLen=(uint8_t)tinfo->sTableNameLen, - .childTableName=tinfo->childTableName}; + RandTableName rName = { tinfo->tags, tinfo->sTableName, (uint8_t)tinfo->sTableNameLen, + tinfo->childTableName, 0 }; buildChildTableName(&rName); tinfo->uid = rName.uid; diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 32866d7469..a66ecc493d 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -114,24 +114,42 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) { int vnodeDecodeConfig(const SJson *pJson, void *pObj) { SVnodeCfg *pCfg = (SVnodeCfg *)pObj; - if (tjsonGetNumberValue(pJson, "vgId", pCfg->vgId) < 0) return -1; + int32_t code; + tjsonGetNumberValue(pJson, "vgId", pCfg->vgId, code); + if(code < 0) return -1; if (tjsonGetStringValue(pJson, "dbname", pCfg->dbname) < 0) return -1; - if (tjsonGetNumberValue(pJson, "dbId", pCfg->dbId) < 0) return -1; - if (tjsonGetNumberValue(pJson, "szPage", pCfg->szPage) < 0) return -1; - if (tjsonGetNumberValue(pJson, "szCache", pCfg->szCache) < 0) return -1; - if (tjsonGetNumberValue(pJson, "szBuf", pCfg->szBuf) < 0) return -1; - if (tjsonGetNumberValue(pJson, "isHeap", pCfg->isHeap) < 0) return -1; - if (tjsonGetNumberValue(pJson, "isWeak", pCfg->isWeak) < 0) return -1; - if (tjsonGetNumberValue(pJson, "precision", pCfg->tsdbCfg.precision) < 0) return -1; - if (tjsonGetNumberValue(pJson, "update", pCfg->tsdbCfg.update) < 0) return -1; - if (tjsonGetNumberValue(pJson, "compression", pCfg->tsdbCfg.compression) < 0) return -1; - if (tjsonGetNumberValue(pJson, "slLevel", pCfg->tsdbCfg.slLevel) < 0) return -1; - if (tjsonGetNumberValue(pJson, "daysPerFile", pCfg->tsdbCfg.days) < 0) return -1; - if (tjsonGetNumberValue(pJson, "minRows", pCfg->tsdbCfg.minRows) < 0) return -1; - if (tjsonGetNumberValue(pJson, "maxRows", pCfg->tsdbCfg.maxRows) < 0) return -1; - if (tjsonGetNumberValue(pJson, "keep0", pCfg->tsdbCfg.keep0) < 0) return -1; - if (tjsonGetNumberValue(pJson, "keep1", pCfg->tsdbCfg.keep1) < 0) return -1; - if (tjsonGetNumberValue(pJson, "keep2", pCfg->tsdbCfg.keep2) < 0) return -1; + tjsonGetNumberValue(pJson, "dbId", pCfg->dbId, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "szPage", pCfg->szPage, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "szCache", pCfg->szCache, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "szBuf", pCfg->szBuf, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "isHeap", pCfg->isHeap, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "isWeak", pCfg->isWeak, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "precision", pCfg->tsdbCfg.precision, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "update", pCfg->tsdbCfg.update, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "compression", pCfg->tsdbCfg.compression, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "slLevel", pCfg->tsdbCfg.slLevel, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "daysPerFile", pCfg->tsdbCfg.days, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "minRows", pCfg->tsdbCfg.minRows, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "maxRows", pCfg->tsdbCfg.maxRows, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "keep0", pCfg->tsdbCfg.keep0, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "keep1", pCfg->tsdbCfg.keep1, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "keep2", pCfg->tsdbCfg.keep2, code); + if(code < 0) return -1; SJson *pNodeRetentions = tjsonGetObjectItem(pJson, "retentions"); int32_t nRetention = tjsonGetArraySize(pNodeRetentions); if (nRetention > TSDB_RETENTION_MAX) { @@ -140,24 +158,36 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { for (int32_t i = 0; i < nRetention; ++i) { SJson *pNodeRetention = tjsonGetArrayItem(pNodeRetentions, i); ASSERT(pNodeRetention != NULL); - tjsonGetNumberValue(pNodeRetention, "freq", (pCfg->tsdbCfg.retentions)[i].freq); - tjsonGetNumberValue(pNodeRetention, "freqUnit", (pCfg->tsdbCfg.retentions)[i].freqUnit); - tjsonGetNumberValue(pNodeRetention, "keep", (pCfg->tsdbCfg.retentions)[i].keep); - tjsonGetNumberValue(pNodeRetention, "keepUnit", (pCfg->tsdbCfg.retentions)[i].keepUnit); + tjsonGetNumberValue(pNodeRetention, "freq", (pCfg->tsdbCfg.retentions)[i].freq, code); + tjsonGetNumberValue(pNodeRetention, "freqUnit", (pCfg->tsdbCfg.retentions)[i].freqUnit, code); + tjsonGetNumberValue(pNodeRetention, "keep", (pCfg->tsdbCfg.retentions)[i].keep, code); + tjsonGetNumberValue(pNodeRetention, "keepUnit", (pCfg->tsdbCfg.retentions)[i].keepUnit, code); } - if (tjsonGetNumberValue(pJson, "wal.vgId", pCfg->walCfg.vgId) < 0) return -1; - if (tjsonGetNumberValue(pJson, "wal.fsyncPeriod", pCfg->walCfg.fsyncPeriod) < 0) return -1; - if (tjsonGetNumberValue(pJson, "wal.retentionPeriod", pCfg->walCfg.retentionPeriod) < 0) return -1; - if (tjsonGetNumberValue(pJson, "wal.rollPeriod", pCfg->walCfg.rollPeriod) < 0) return -1; - if (tjsonGetNumberValue(pJson, "wal.retentionSize", pCfg->walCfg.retentionSize) < 0) return -1; - if (tjsonGetNumberValue(pJson, "wal.segSize", pCfg->walCfg.segSize) < 0) return -1; - if (tjsonGetNumberValue(pJson, "wal.level", pCfg->walCfg.level) < 0) return -1; - if (tjsonGetNumberValue(pJson, "hashBegin", pCfg->hashBegin) < 0) return -1; - if (tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1; - if (tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1; + tjsonGetNumberValue(pJson, "wal.vgId", pCfg->walCfg.vgId, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "wal.fsyncPeriod", pCfg->walCfg.fsyncPeriod, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "wal.retentionPeriod", pCfg->walCfg.retentionPeriod, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "wal.rollPeriod", pCfg->walCfg.rollPeriod, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "wal.retentionSize", pCfg->walCfg.retentionSize, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "wal.segSize", pCfg->walCfg.segSize, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "wal.level", pCfg->walCfg.level, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "hashBegin", pCfg->hashBegin, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod, code); + if(code < 0) return -1; - if (tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1; - if (tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1; + tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex, code); + if(code < 0) return -1; SJson *pNodeInfoArr = tjsonGetObjectItem(pJson, "syncCfg.nodeInfo"); int arraySize = tjsonGetArraySize(pNodeInfoArr); @@ -166,7 +196,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { for (int i = 0; i < arraySize; ++i) { SJson *pNodeInfo = tjsonGetArrayItem(pNodeInfoArr, i); assert(pNodeInfo != NULL); - tjsonGetNumberValue(pNodeInfo, "nodePort", (pCfg->syncCfg.nodeInfo)[i].nodePort); + tjsonGetNumberValue(pNodeInfo, "nodePort", (pCfg->syncCfg.nodeInfo)[i].nodePort, code); tjsonGetStringValue(pNodeInfo, "nodeFqdn", (pCfg->syncCfg.nodeInfo)[i].nodeFqdn); } diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 6d8bcb35c8..e7bee3342a 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -310,8 +310,11 @@ static int vnodeEncodeState(const void *pObj, SJson *pJson) { static int vnodeDecodeState(const SJson *pJson, void *pObj) { SVState *pState = (SVState *)pObj; - if (tjsonGetNumberValue(pJson, "commit version", pState->committed) < 0) return -1; - if (tjsonGetNumberValue(pJson, "applied version", pState->applied) < 0) return -1; + int32_t code; + tjsonGetNumberValue(pJson, "commit version", pState->committed, code); + if(code < 0) return -1; + tjsonGetNumberValue(pJson, "applied version", pState->applied, code); + if(code < 0) return -1; return 0; } diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 8e96a2a063..51829cfdd8 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -34,6 +34,9 @@ typedef struct SUdfdData { uv_thread_t thread; uv_barrier_t barrier; uv_process_t process; +#ifdef WINDOWS + HANDLE jobHandle; +#endif int spawnErr; uv_pipe_t ctrlPipe; uv_async_t stopAsync; @@ -104,6 +107,24 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) { int err = uv_spawn(&pData->loop, &pData->process, &options); pData->process.data = (void*)pData; +#ifdef WINDOWS + // End udfd.exe by Job. + if (pData->jobHandle != NULL) CloseHandle(pData->jobHandle); + pData->jobHandle = CreateJobObject(NULL, NULL); + bool add_job_ok = AssignProcessToJobObject(pData->jobHandle, pData->process.process_handle); + if (!add_job_ok) { + fnError("Assign udfd to job failed."); + } else { + JOBOBJECT_EXTENDED_LIMIT_INFORMATION limit_info; + memset(&limit_info, 0x0, sizeof(limit_info)); + limit_info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + bool set_auto_kill_ok = SetInformationJobObject(pData->jobHandle, JobObjectExtendedLimitInformation, &limit_info, sizeof(limit_info)); + if (!set_auto_kill_ok) { + fnError("Set job auto kill udfd failed."); + } + } +#endif + if (err != 0) { fnError("can not spawn udfd. path: %s, error: %s", path, uv_strerror(err)); } @@ -182,6 +203,9 @@ int32_t udfStopUdfd() { uv_barrier_destroy(&pData->barrier); uv_async_send(&pData->stopAsync); uv_thread_join(&pData->thread); +#ifdef WINDOWS + if (pData->jobHandle != NULL) CloseHandle(pData->jobHandle); +#endif fnInfo("dnode-mgmt udfd cleaned up"); return 0; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 71b0774ca6..6e0775ff17 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -317,15 +317,16 @@ static int32_t tableComInfoToJson(const void* pObj, SJson* pJson) { static int32_t jsonToTableComInfo(const SJson* pJson, void* pObj) { STableComInfo* pNode = (STableComInfo*)pObj; - int32_t code = tjsonGetNumberValue(pJson, jkTableComInfoNumOfTags, pNode->numOfTags); + int32_t code; + tjsonGetNumberValue(pJson, jkTableComInfoNumOfTags, pNode->numOfTags, code);; if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableComInfoPrecision, pNode->precision); + tjsonGetNumberValue(pJson, jkTableComInfoPrecision, pNode->precision, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableComInfoNumOfColumns, pNode->numOfColumns); + tjsonGetNumberValue(pJson, jkTableComInfoNumOfColumns, pNode->numOfColumns, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableComInfoRowSize, pNode->rowSize); + tjsonGetNumberValue(pJson, jkTableComInfoRowSize, pNode->rowSize, code);; } return code; @@ -356,12 +357,13 @@ static int32_t schemaToJson(const void* pObj, SJson* pJson) { static int32_t jsonToSchema(const SJson* pJson, void* pObj) { SSchema* pNode = (SSchema*)pObj; - int32_t code = tjsonGetNumberValue(pJson, jkSchemaType, pNode->type); + int32_t code; + tjsonGetNumberValue(pJson, jkSchemaType, pNode->type, code);; if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkSchemaColId, pNode->colId); + tjsonGetNumberValue(pJson, jkSchemaColId, pNode->colId, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkSchemaBytes, pNode->bytes); + tjsonGetNumberValue(pJson, jkSchemaBytes, pNode->bytes, code);; } if (TSDB_CODE_SUCCESS == code) { code = tjsonGetStringValue(pJson, jkSchemaName, pNode->name); @@ -412,21 +414,22 @@ static int32_t tableMetaToJson(const void* pObj, SJson* pJson) { static int32_t jsonToTableMeta(const SJson* pJson, void* pObj) { STableMeta* pNode = (STableMeta*)pObj; - int32_t code = tjsonGetNumberValue(pJson, jkTableMetaVgId, pNode->vgId); + int32_t code; + tjsonGetNumberValue(pJson, jkTableMetaVgId, pNode->vgId, code);; if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableMetaTableType, pNode->tableType); + tjsonGetNumberValue(pJson, jkTableMetaTableType, pNode->tableType, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableMetaUid, pNode->uid); + tjsonGetNumberValue(pJson, jkTableMetaUid, pNode->uid, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableMetaSuid, pNode->suid); + tjsonGetNumberValue(pJson, jkTableMetaSuid, pNode->suid, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableMetaSversion, pNode->sversion); + tjsonGetNumberValue(pJson, jkTableMetaSversion, pNode->sversion, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableMetaTversion, pNode->tversion); + tjsonGetNumberValue(pJson, jkTableMetaTversion, pNode->tversion, code);; } if (TSDB_CODE_SUCCESS == code) { code = tjsonToObject(pJson, jkTableMetaComInfo, jsonToTableComInfo, &pNode->tableInfo); @@ -602,7 +605,7 @@ static int32_t jsonToLogicFillNode(const SJson* pJson, void* pObj) { int32_t code = jsonToLogicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkFillLogicPlanMode, pNode->mode); + tjsonGetNumberValue(pJson, jkFillLogicPlanMode, pNode->mode, code);; } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkFillLogicPlanWStartTs, &pNode->pWStartTs); @@ -878,7 +881,7 @@ static int32_t jsonToLogicSubplan(const SJson* pJson, void* pObj) { code = jsonToNodeObject(pJson, jkLogicSubplanRootNode, (SNode**)&pNode->pNode); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkLogicSubplanType, pNode->subplanType); + tjsonGetNumberValue(pJson, jkLogicSubplanType, pNode->subplanType, code);; } int32_t objSize = 0; if (TSDB_CODE_SUCCESS == code) { @@ -1118,25 +1121,25 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) { code = tjsonGetDoubleValue(pJson, jkTableScanPhysiPlanRatio, &pNode->ratio); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableScanPhysiPlanDataRequired, pNode->dataRequired); + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanDataRequired, pNode->dataRequired, code);; } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkTableScanPhysiPlanDynamicScanFuncs, &pNode->pDynamicScanFuncs); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableScanPhysiPlanInterval, pNode->interval); + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanInterval, pNode->interval, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableScanPhysiPlanOffset, pNode->offset); + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanOffset, pNode->offset, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSliding, pNode->sliding); + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSliding, pNode->sliding, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableScanPhysiPlanIntervalUnit, pNode->intervalUnit); + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanIntervalUnit, pNode->intervalUnit, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit); + tjsonGetNumberValue(pJson, jkTableScanPhysiPlanSlidingUnit, pNode->slidingUnit, code);; } return code; @@ -1178,7 +1181,7 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) { code = tjsonGetBoolValue(pJson, jkSysTableScanPhysiPlanShowRewrite, &pNode->showRewrite); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId); + tjsonGetNumberValue(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId, code);; } return code; @@ -1262,7 +1265,7 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) { int32_t code = jsonToPhysicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkJoinPhysiPlanJoinType, pNode->joinType); + tjsonGetNumberValue(pJson, jkJoinPhysiPlanJoinType, pNode->joinType, code);; } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions); @@ -1424,10 +1427,10 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) { code = jsonToNodeObject(pJson, jkWindowPhysiPlanTsPk, (SNode**)&pNode->pTspk); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkWindowPhysiPlanTriggerType, pNode->triggerType); + tjsonGetNumberValue(pJson, jkWindowPhysiPlanTriggerType, pNode->triggerType, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkWindowPhysiPlanWatermark, pNode->watermark); + tjsonGetNumberValue(pJson, jkWindowPhysiPlanWatermark, pNode->watermark, code);; } return code; @@ -1523,7 +1526,7 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) { int32_t code = jsonToPhysicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkFillPhysiPlanMode, pNode->mode); + tjsonGetNumberValue(pJson, jkFillPhysiPlanMode, pNode->mode, code);; } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkFillPhysiPlanWStartTs, &pNode->pWStartTs); @@ -1562,7 +1565,7 @@ static int32_t jsonToPhysiSessionWindowNode(const SJson* pJson, void* pObj) { int32_t code = jsonToPhysiWindowNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkSessionWindowPhysiPlanGap, pNode->gap); + tjsonGetNumberValue(pJson, jkSessionWindowPhysiPlanGap, pNode->gap, code);; } return code; @@ -1724,7 +1727,7 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) { int32_t code = tjsonToObject(pJson, jkSubplanId, jsonToSubplanId, &pNode->id); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkSubplanType, pNode->subplanType); + tjsonGetNumberValue(pJson, jkSubplanType, pNode->subplanType, code);; } if (TSDB_CODE_SUCCESS == code) { code = tjsonGetIntValue(pJson, jkSubplanMsgType, &pNode->msgType); @@ -1914,7 +1917,7 @@ static int32_t jsonToColumnNode(const SJson* pJson, void* pObj) { code = tjsonGetSmallIntValue(pJson, jkColumnColId, &pNode->colId); } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkColumnColType, pNode->colType); + tjsonGetNumberValue(pJson, jkColumnColType, pNode->colType, code);; } if (TSDB_CODE_SUCCESS == code) { code = tjsonGetStringValue(pJson, jkColumnDbName, pNode->dbName); @@ -2168,7 +2171,7 @@ static int32_t jsonToOperatorNode(const SJson* pJson, void* pObj) { int32_t code = jsonToExprNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkOperatorType, pNode->opType); + tjsonGetNumberValue(pJson, jkOperatorType, pNode->opType, code);; } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkOperatorLeft, &pNode->pLeft); @@ -2202,7 +2205,7 @@ static int32_t jsonToLogicConditionNode(const SJson* pJson, void* pObj) { int32_t code = jsonToExprNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkLogicCondType, pNode->condType); + tjsonGetNumberValue(pJson, jkLogicCondType, pNode->condType, code);; } if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkLogicCondParameters, &pNode->pParameterList); @@ -2384,10 +2387,10 @@ static int32_t jsonToOrderByExprNode(const SJson* pJson, void* pObj) { int32_t code = jsonToNodeObject(pJson, jkOrderByExprExpr, &pNode->pExpr); if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkOrderByExprOrder, pNode->order); + tjsonGetNumberValue(pJson, jkOrderByExprOrder, pNode->order, code);; } if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetNumberValue(pJson, jkOrderByExprNullOrder, pNode->nullOrder); + tjsonGetNumberValue(pJson, jkOrderByExprNullOrder, pNode->nullOrder, code);; } return code; @@ -2493,7 +2496,8 @@ static int32_t fillNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToFillNode(const SJson* pJson, void* pObj) { SFillNode* pNode = (SFillNode*)pObj; - int32_t code = tjsonGetNumberValue(pJson, jkFillMode, pNode->mode); + int32_t code; + tjsonGetNumberValue(pJson, jkFillMode, pNode->mode, code);; if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeObject(pJson, jkFillValues, &pNode->pValues); } @@ -3033,7 +3037,8 @@ static int32_t nodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToNode(const SJson* pJson, void* pObj) { SNode* pNode = (SNode*)pObj; - int32_t code = tjsonGetNumberValue(pJson, jkNodeType, pNode->type); + int32_t code; + tjsonGetNumberValue(pJson, jkNodeType, pNode->type, code);; if (TSDB_CODE_SUCCESS == code) { code = tjsonToObject(pJson, nodesNodeName(pNode->type), jsonToSpecificNode, pNode); if (TSDB_CODE_SUCCESS != code) { diff --git a/source/os/src/osAtomic.c b/source/os/src/osAtomic.c index 0fe946bf68..e4d880f40a 100644 --- a/source/os/src/osAtomic.c +++ b/source/os/src/osAtomic.c @@ -36,7 +36,7 @@ int64_t interlocked_add_fetch_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_add_fetch_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)(_InterlockedExchangeAdd((int32_t volatile*)(ptr), (int32_t)val) + (int32_t)val); #else return (void*)(InterlockedExchangeAdd64((int64_t volatile*)(ptr), (int64_t)val) + (int64_t)val); @@ -56,7 +56,7 @@ int32_t interlocked_and_fetch_32(int32_t volatile* ptr, int32_t val) { } int64_t interlocked_and_fetch_64(int64_t volatile* ptr, int64_t val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS int64_t old, res; do { old = *ptr; @@ -69,7 +69,7 @@ int64_t interlocked_and_fetch_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_and_fetch_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)interlocked_and_fetch_32((int32_t volatile*)ptr, (int32_t)val); #else return (void*)interlocked_and_fetch_64((int64_t volatile*)ptr, (int64_t)val); @@ -77,7 +77,7 @@ void* interlocked_and_fetch_ptr(void* volatile* ptr, void* val) { } int64_t interlocked_fetch_and_64(int64_t volatile* ptr, int64_t val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS int64_t old; do { old = *ptr; @@ -89,7 +89,7 @@ int64_t interlocked_fetch_and_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_fetch_and_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)_InterlockedAnd((int32_t volatile*)(ptr), (int32_t)(val)); #else return (void*)_InterlockedAnd64((int64_t volatile*)(ptr), (int64_t)(val)); @@ -109,7 +109,7 @@ int32_t interlocked_or_fetch_32(int32_t volatile* ptr, int32_t val) { } int64_t interlocked_or_fetch_64(int64_t volatile* ptr, int64_t val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS int64_t old, res; do { old = *ptr; @@ -122,7 +122,7 @@ int64_t interlocked_or_fetch_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_or_fetch_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)interlocked_or_fetch_32((int32_t volatile*)ptr, (int32_t)val); #else return (void*)interlocked_or_fetch_64((int64_t volatile*)ptr, (int64_t)val); @@ -130,7 +130,7 @@ void* interlocked_or_fetch_ptr(void* volatile* ptr, void* val) { } int64_t interlocked_fetch_or_64(int64_t volatile* ptr, int64_t val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS int64_t old; do { old = *ptr; @@ -142,7 +142,7 @@ int64_t interlocked_fetch_or_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_fetch_or_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)_InterlockedOr((int32_t volatile*)(ptr), (int32_t)(val)); #else return (void*)interlocked_fetch_or_64((int64_t volatile*)(ptr), (int64_t)(val)); @@ -162,7 +162,7 @@ int32_t interlocked_xor_fetch_32(int32_t volatile* ptr, int32_t val) { } int64_t interlocked_xor_fetch_64(int64_t volatile* ptr, int64_t val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS int64_t old, res; do { old = *ptr; @@ -175,7 +175,7 @@ int64_t interlocked_xor_fetch_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_xor_fetch_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)interlocked_xor_fetch_32((int32_t volatile*)(ptr), (int32_t)(val)); #else return (void*)interlocked_xor_fetch_64((int64_t volatile*)(ptr), (int64_t)(val)); @@ -183,7 +183,7 @@ void* interlocked_xor_fetch_ptr(void* volatile* ptr, void* val) { } int64_t interlocked_fetch_xor_64(int64_t volatile* ptr, int64_t val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS int64_t old; do { old = *ptr; @@ -195,7 +195,7 @@ int64_t interlocked_fetch_xor_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_fetch_xor_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)_InterlockedXor((int32_t volatile*)(ptr), (int32_t)(val)); #else return (void*)interlocked_fetch_xor_64((int64_t volatile*)(ptr), (int64_t)(val)); @@ -211,7 +211,7 @@ int64_t interlocked_sub_fetch_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_sub_fetch_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)interlocked_sub_fetch_32((int32_t volatile*)ptr, (int32_t)val); #else return (void*)interlocked_add_fetch_64((int64_t volatile*)ptr, (int64_t)val); @@ -226,7 +226,7 @@ int64_t interlocked_fetch_sub_64(int64_t volatile* ptr, int64_t val) { } void* interlocked_fetch_sub_ptr(void* volatile* ptr, void* val) { -#ifdef _TD_WINDOWS_32 +#ifdef WINDOWS return (void*)interlocked_fetch_sub_32((int32_t volatile*)ptr, (int32_t)val); #else return (void*)interlocked_fetch_sub_64((int64_t volatile*)ptr, (int64_t)val); diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index 425bf8b7ac..3cd05b65cd 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -109,6 +109,7 @@ void taosGetTmpfilePath(const char *inputTmpDir, const char *fileNamePrefix, cha int64_t taosCopyFile(const char *from, const char *to) { #ifdef WINDOWS + assert(0); return 0; #else char buffer[4096]; @@ -152,16 +153,16 @@ int32_t taosRemoveFile(const char *path) { return remove(path); } int32_t taosRenameFile(const char *oldName, const char *newName) { #ifdef WINDOWS - int32_t code = MoveFileEx(oldName, newName, MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED); - if (code < 0) { - // printf("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); + bool code = MoveFileEx(oldName, newName, MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED); + if (!code) { + printf("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); } - return code; + return !code; #else int32_t code = rename(oldName, newName); if (code < 0) { - // printf("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); + printf("failed to rename file %s to %s, reason:%s", oldName, newName, strerror(errno)); } return code; @@ -169,11 +170,12 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { } int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { -#ifdef WINDOWS - return 0; -#else struct stat fileStat; +#ifdef WINDOWS + int32_t code = _stat(path, &fileStat); +#else int32_t code = stat(path, &fileStat); +#endif if (code < 0) { return code; } @@ -187,14 +189,15 @@ int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { } return 0; -#endif } int32_t taosDevInoFile(const char *path, int64_t *stDev, int64_t *stIno) { -#ifdef WINDOWS - return 0; -#else + struct stat fileStat; +#ifdef WINDOWS + int32_t code = _stat(path, &fileStat); +#else int32_t code = stat(path, &fileStat); +#endif if (code < 0) { return code; } @@ -208,7 +211,6 @@ int32_t taosDevInoFile(const char *path, int64_t *stDev, int64_t *stIno) { } return 0; -#endif } void autoDelFileListAdd(const char *path) { return; } @@ -276,9 +278,6 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) { } int64_t taosCloseFile(TdFilePtr *ppFile) { -#ifdef WINDOWS - return 0; -#else if (ppFile == NULL || *ppFile == NULL) { return 0; } @@ -294,7 +293,12 @@ int64_t taosCloseFile(TdFilePtr *ppFile) { (*ppFile)->fp = NULL; } if ((*ppFile)->fd >= 0) { + #ifdef WINDOWS + HANDLE h = (HANDLE)_get_osfhandle((*ppFile)->fd); + !FlushFileBuffers(h); + #else fsync((*ppFile)->fd); + #endif close((*ppFile)->fd); (*ppFile)->fd = -1; } @@ -306,7 +310,6 @@ int64_t taosCloseFile(TdFilePtr *ppFile) { taosMemoryFree(*ppFile); *ppFile = NULL; return 0; -#endif } int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count) { @@ -412,13 +415,17 @@ int64_t taosLSeekFile(TdFilePtr pFile, int64_t offset, int32_t whence) { } int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) { -#ifdef WINDOWS - return 0; -#else + if (pFile == NULL) { + return 0; + } assert(pFile->fd >= 0); // Please check if you have closed the file. struct stat fileStat; +#ifdef WINDOWS + int32_t code = _fstat(pFile->fd, &fileStat); +#else int32_t code = fstat(pFile->fd, &fileStat); +#endif if (code < 0) { return code; } @@ -432,7 +439,6 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) { } return 0; -#endif } int32_t taosLockFile(TdFilePtr pFile) { @@ -459,7 +465,7 @@ int32_t taosFtruncateFile(TdFilePtr pFile, int64_t l_size) { #ifdef WINDOWS if (pFile->fd < 0) { errno = EBADF; - printf("%s\n", "fd arg was negative"); + printf("Ftruncate file error, fd arg was negative\n"); return -1; } @@ -516,26 +522,20 @@ int32_t taosFtruncateFile(TdFilePtr pFile, int64_t l_size) { } int32_t taosFsyncFile(TdFilePtr pFile) { -#ifdef WINDOWS - if (pFile->fd < 0) { - errno = EBADF; - printf("%s\n", "fd arg was negative"); - return -1; - } - - HANDLE h = (HANDLE)_get_osfhandle(pFile->fd); - - return !FlushFileBuffers(h); -#else if (pFile == NULL) { return 0; } if (pFile->fp != NULL) return fflush(pFile->fp); - if (pFile->fd >= 0) return fsync(pFile->fd); - + if (pFile->fd >= 0) { + #ifdef WINDOWS + HANDLE h = (HANDLE)_get_osfhandle(pFile->fd); + return !FlushFileBuffers(h); + #else + return fsync(pFile->fd); + #endif + } return 0; -#endif } int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, int64_t size) { diff --git a/source/os/src/osMemory.c b/source/os/src/osMemory.c index 7c877b463a..e3791af618 100644 --- a/source/os/src/osMemory.c +++ b/source/os/src/osMemory.c @@ -138,7 +138,7 @@ static void print_line(Dwarf_Debug dbg, Dwarf_Line line, Dwarf_Addr pc) { dwarf_linesrc(line, &linesrc, NULL); dwarf_lineno(line, &lineno, NULL); } - printf("%s:%" DW_PR_DUu "\n", linesrc, lineno); + printf("BackTrace %08" PRId64 " %s:%" DW_PR_DUu "\n", taosGetSelfPthreadId(), linesrc, lineno); if (line) dwarf_dealloc(dbg, linesrc, DW_DLA_STRING); } void taosPrintBackTrace() { diff --git a/source/os/src/osProc.c b/source/os/src/osProc.c index f92a3b3783..74f1356abf 100644 --- a/source/os/src/osProc.c +++ b/source/os/src/osProc.c @@ -19,6 +19,7 @@ int32_t taosNewProc(char **args) { #ifdef WINDOWS + assert(0); return 0; #else int32_t pid = fork(); @@ -36,6 +37,7 @@ int32_t taosNewProc(char **args) { void taosWaitProc(int32_t pid) { #ifdef WINDOWS + assert(0); #else int32_t status = -1; waitpid(pid, &status, 0); @@ -44,6 +46,7 @@ void taosWaitProc(int32_t pid) { void taosKillProc(int32_t pid) { #ifdef WINDOWS + assert(0); #else kill(pid, SIGINT); #endif @@ -51,6 +54,7 @@ void taosKillProc(int32_t pid) { bool taosProcExist(int32_t pid) { #ifdef WINDOWS + assert(0); return false; #else int32_t p = getpgid(pid); diff --git a/source/os/src/osShm.c b/source/os/src/osShm.c index 1cd51f94a0..cb09e2fb38 100644 --- a/source/os/src/osShm.c +++ b/source/os/src/osShm.c @@ -23,6 +23,7 @@ static int32_t shmids[MAX_SHMIDS] = {0}; static void taosDeleteCreatedShms() { #if defined(WINDOWS) + assert(0); #else for (int32_t i = 0; i < MAX_SHMIDS; ++i) { int32_t shmid = shmids[i] - 1; @@ -35,6 +36,7 @@ static void taosDeleteCreatedShms() { int32_t taosCreateShm(SShm* pShm, int32_t key, int32_t shmsize) { #if defined(WINDOWS) + assert(0); #else pShm->id = -1; @@ -75,6 +77,7 @@ int32_t taosCreateShm(SShm* pShm, int32_t key, int32_t shmsize) { void taosDropShm(SShm* pShm) { #if defined(WINDOWS) + assert(0); #else if (pShm->id >= 0) { if (pShm->ptr != NULL) { @@ -90,6 +93,7 @@ void taosDropShm(SShm* pShm) { int32_t taosAttachShm(SShm* pShm) { #if defined(WINDOWS) + assert(0); #else errno = 0; diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index 2410586287..105acb188a 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -285,6 +285,7 @@ int32_t taosGetSockOpt(TdSocketPtr pSocket, int32_t level, int32_t optname, void return -1; } #ifdef WINDOWS + assert(0); return 0; #else return getsockopt(pSocket->fd, level, optname, optval, (int *)optlen); @@ -642,6 +643,7 @@ int32_t taosKeepTcpAlive(TdSocketPtr pSocket) { int taosGetLocalIp(const char *eth, char *ip) { #if defined(WINDOWS) // DO NOTHAING + assert(0); return 0; #else int fd; @@ -668,6 +670,7 @@ int taosGetLocalIp(const char *eth, char *ip) { int taosValidIp(uint32_t ip) { #if defined(WINDOWS) // DO NOTHAING + assert(0); return 0; #else int ret = -1; @@ -866,6 +869,7 @@ int64_t taosCopyFds(TdSocketPtr pSrcSocket, TdSocketPtr pDestSocket, int64_t len void taosBlockSIGPIPE() { #ifdef WINDOWS + // assert(0); #else sigset_t signal_mask; sigemptyset(&signal_mask); @@ -976,14 +980,12 @@ void tinet_ntoa(char *ipstr, uint32_t ip) { } void taosIgnSIGPIPE() { -#ifdef WINDOWS -#else signal(SIGPIPE, SIG_IGN); -#endif } void taosSetMaskSIGPIPE() { #ifdef WINDOWS + // assert(0); #else sigset_t signal_mask; sigemptyset(&signal_mask); @@ -1005,6 +1007,7 @@ int32_t taosGetSocketName(TdSocketPtr pSocket, struct sockaddr *destAddr, int *a TdEpollPtr taosCreateEpoll(int32_t size) { EpollFd fd = -1; #ifdef WINDOWS + assert(0); #else fd = epoll_create(size); #endif @@ -1027,6 +1030,7 @@ int32_t taosCtlEpoll(TdEpollPtr pEpoll, int32_t epollOperate, TdSocketPtr pSocke return -1; } #ifdef WINDOWS + assert(0); #else code = epoll_ctl(pEpoll->fd, epollOperate, pSocket->fd, event); #endif @@ -1038,6 +1042,7 @@ int32_t taosWaitEpoll(TdEpollPtr pEpoll, struct epoll_event *event, int32_t maxE return -1; } #ifdef WINDOWS + assert(0); #else code = epoll_wait(pEpoll->fd, event, maxEvents, timeout); #endif diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 348424b372..fd6172e04f 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -129,7 +129,21 @@ static void taosGetProcIOnfos() { static int32_t taosGetSysCpuInfo(SysCpuInfo *cpuInfo) { #ifdef WINDOWS + FILETIME pre_idleTime = {0}; + FILETIME pre_kernelTime = {0}; + FILETIME pre_userTime = {0}; + FILETIME idleTime; + FILETIME kernelTime; + FILETIME userTime; + bool res = GetSystemTimes(&idleTime, &kernelTime, &userTime); + if (res) { + cpuInfo->idle = CompareFileTime(&pre_idleTime, &idleTime); + cpuInfo->system = CompareFileTime(&pre_kernelTime, &kernelTime); + cpuInfo->user = CompareFileTime(&pre_userTime, &userTime); + cpuInfo->nice = 0; + } #elif defined(_TD_DARWIN_64) + assert(0); #else TdFilePtr pFile = taosOpenFile(tsSysCpuFile, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) { @@ -155,7 +169,18 @@ static int32_t taosGetSysCpuInfo(SysCpuInfo *cpuInfo) { static int32_t taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) { #ifdef WINDOWS + FILETIME pre_krnlTm = {0}; + FILETIME pre_usrTm = {0}; + FILETIME creatTm, exitTm, krnlTm, usrTm; + + if (GetThreadTimes(GetCurrentThread(), &creatTm, &exitTm, &krnlTm, &usrTm)) { + cpuInfo->stime = CompareFileTime(&pre_krnlTm, &krnlTm); + cpuInfo->utime = CompareFileTime(&pre_usrTm, &usrTm); + cpuInfo->cutime = 0; + cpuInfo->cstime = 0; + } #elif defined(_TD_DARWIN_64) + assert(0); #else TdFilePtr pFile = taosOpenFile(tsProcCpuFile, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) { @@ -219,6 +244,7 @@ void taosGetSystemInfo() { int32_t taosGetEmail(char *email, int32_t maxLen) { #ifdef WINDOWS + // assert(0); #elif defined(_TD_DARWIN_64) const char *filepath = "/usr/local/taos/email"; @@ -250,6 +276,7 @@ int32_t taosGetEmail(char *email, int32_t maxLen) { int32_t taosGetOsReleaseName(char *releaseName, int32_t maxLen) { #ifdef WINDOWS + assert(0); #elif defined(_TD_DARWIN_64) char *line = NULL; size_t size = 0; @@ -305,6 +332,7 @@ int32_t taosGetOsReleaseName(char *releaseName, int32_t maxLen) { int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) { #ifdef WINDOWS + assert(0); #elif defined(_TD_DARWIN_64) char *line = NULL; size_t size = 0; @@ -716,9 +744,7 @@ int32_t taosGetSystemUUID(char *uid, int32_t uidlen) { #ifdef WINDOWS GUID guid; CoCreateGuid(&guid); - - sprintf(uid, "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X", guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], - guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]); + memcpy(uid, &guid, uidlen); return 0; #elif defined(_TD_DARWIN_64) @@ -750,6 +776,7 @@ int32_t taosGetSystemUUID(char *uid, int32_t uidlen) { char *taosGetCmdlineByPID(int pid) { #ifdef WINDOWS + assert(0); return ""; #elif defined(_TD_DARWIN_64) static char cmdline[1024]; diff --git a/source/os/src/osSystem.c b/source/os/src/osSystem.c index 62c1747619..ba07b6c3dd 100644 --- a/source/os/src/osSystem.c +++ b/source/os/src/osSystem.c @@ -33,6 +33,7 @@ typedef struct FILE TdCmd; void* taosLoadDll(const char* filename) { #if defined(WINDOWS) + assert(0); return NULL; #elif defined(_TD_DARWIN_64) return NULL; @@ -51,6 +52,7 @@ void* taosLoadDll(const char* filename) { void* taosLoadSym(void* handle, char* name) { #if defined(WINDOWS) + assert(0); return NULL; #elif defined(_TD_DARWIN_64) return NULL; @@ -71,6 +73,7 @@ void* taosLoadSym(void* handle, char* name) { void taosCloseDll(void* handle) { #if defined(WINDOWS) + assert(0); return; #elif defined(_TD_DARWIN_64) return; @@ -121,6 +124,7 @@ int taosSetConsoleEcho(bool on) { void taosSetTerminalMode() { #if defined(WINDOWS) + // assert(0); #else struct termios newtio; @@ -154,7 +158,7 @@ void taosSetTerminalMode() { int32_t taosGetOldTerminalMode() { #if defined(WINDOWS) - + // assert(0); #else /* Make sure stdin is a terminal. */ if (!isatty(STDIN_FILENO)) { @@ -172,7 +176,7 @@ int32_t taosGetOldTerminalMode() { void taosResetTerminalMode() { #if defined(WINDOWS) - + // assert(0); #else if (tcsetattr(0, TCSANOW, &oldtio) != 0) { fprintf(stderr, "Fail to reset the terminal properties!\n"); diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c index 8f48e0585e..11d7d9831a 100644 --- a/source/util/src/tconfig.c +++ b/source/util/src/tconfig.c @@ -640,13 +640,14 @@ int32_t cfgLoadFromEnvVar(SConfig *pConfig) { } int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd) { - char *buf, *name, *value, *value2, *value3; + char buf[1024], *name, *value, *value2, *value3; int32_t olen, vlen, vlen2, vlen3; int32_t index = 0; if (envCmd == NULL) return 0; while (envCmd[index]!=NULL) { - buf = taosMemoryMalloc(strlen(envCmd[index])); - taosEnvToCfg(envCmd[index], buf); + strncpy(buf, envCmd[index], sizeof(buf)-1); + buf[sizeof(buf)-1] = 0; + taosEnvToCfg(buf, buf); index++; name = value = value2 = value3 = NULL; @@ -671,8 +672,6 @@ int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd) { if (value2 != NULL && value3 != NULL && value2[0] != 0 && value3[0] != 0 && strcasecmp(name, "dataDir") == 0) { cfgSetTfsItem(pConfig, name, value, value2, value3, CFG_STYPE_ENV_CMD); } - - taosMemoryFree(buf); } uInfo("load from env cmd cfg success"); diff --git a/tools/shell/src/shellCommand.c b/tools/shell/src/shellCommand.c index f4f7c893c4..ef71f3fce6 100644 --- a/tools/shell/src/shellCommand.c +++ b/tools/shell/src/shellCommand.c @@ -53,79 +53,6 @@ static void shellResetCommand(SShellCmd *cmd, const char s[]); static void shellClearScreen(int32_t ecmd_pos, int32_t cursor_pos); static void shellShowOnScreen(SShellCmd *cmd); -#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) -// static void shellPrintContinuePrompt() { printf("%s", shell.args.promptContinue); } -// static void shellPrintPrompt() { printf("%s", shell.args.promptHeader); } - -void shellUpdateBuffer(SShellCmd *cmd) { - if (shellRegexMatch(cmd->buffer, "(\\s+$)|(^$)", REG_EXTENDED)) strcat(cmd->command, " "); - strcat(cmd->buffer, cmd->command); - - memset(cmd->command, 0, SHELL_MAX_COMMAND_SIZE); - cmd->cursorOffset = 0; -} - -int shellIsReadyGo(SShellCmd *cmd) { - char *total = taosMemoryMalloc(SHELL_MAX_COMMAND_SIZE); - memset(total, 0, SHELL_MAX_COMMAND_SIZE); - sprintf(total, "%s%s", cmd->buffer, cmd->command); - - char *reg_str = - "(^.*;\\s*$)|(^\\s*$)|(^\\s*exit\\s*$)|(^\\s*q\\s*$)|(^\\s*quit\\s*$)|(^" - "\\s*clear\\s*$)"; - if (shellRegexMatch(total, reg_str, REG_EXTENDED | REG_ICASE)) { - taosMemoryFree(total); - return 1; - } - - taosMemoryFree(total); - return 0; -} - -void shellInsertChar(SShellCmd *cmd, char c) { - if (cmd->cursorOffset >= SHELL_MAX_COMMAND_SIZE) { - fprintf(stdout, "sql is larger than %d bytes", SHELL_MAX_COMMAND_SIZE); - return; - } - cmd->command[cmd->cursorOffset++] = c; -} - -int32_t shellReadCommand(char command[]) { - SShellCmd cmd; - memset(&cmd, 0, sizeof(cmd)); - cmd.buffer = (char *)taosMemoryCalloc(1, SHELL_MAX_COMMAND_SIZE); - cmd.command = (char *)taosMemoryCalloc(1, SHELL_MAX_COMMAND_SIZE); - - // Read input. - char c; - while (1) { - c = getchar(); - - switch (c) { - case '\n': - case '\r': - if (shellIsReadyGo(&cmd)) { - sprintf(command, "%s%s", cmd.buffer, cmd.command); - taosMemoryFree(cmd.buffer); - cmd.buffer = NULL; - taosMemoryFree(cmd.command); - cmd.command = NULL; - return 0; - } else { - // shellPrintContinuePrompt(); - shellUpdateBuffer(&cmd); - } - break; - default: - shellInsertChar(&cmd, c); - } - } - - return 0; -} - -#else - int32_t shellCountPrefixOnes(uint8_t c) { uint8_t mask = 127; mask = ~mask; @@ -181,7 +108,10 @@ void shellInsertChar(SShellCmd *cmd, char *c, int32_t size) { cmd->cursorOffset += size; cmd->screenOffset += taosWcharWidth(wc); cmd->endOffset += taosWcharWidth(wc); +#ifdef WINDOWS +#else shellShowOnScreen(cmd); +#endif } void shellBackspaceChar(SShellCmd *cmd) { @@ -371,17 +301,33 @@ void shellResetCommand(SShellCmd *cmd, const char s[]) { shellShowOnScreen(cmd); } -void shellClearScreen(int32_t ecmd_pos, int32_t cursor_pos) { + +void shellGetScreenSize(int32_t *ws_col, int32_t *ws_row) { +#ifdef WINDOWS + CONSOLE_SCREEN_BUFFER_INFO csbi; + GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi); + if (ws_col != NULL) *ws_col = csbi.srWindow.Right - csbi.srWindow.Left + 1; + if (ws_row != NULL) *ws_row = csbi.srWindow.Bottom - csbi.srWindow.Top + 1; +#else struct winsize w; if (ioctl(0, TIOCGWINSZ, &w) < 0 || w.ws_col == 0 || w.ws_row == 0) { // fprintf(stderr, "No stream device, and use default value(col 120, row 30)\n"); - w.ws_col = 120; - w.ws_row = 30; + if (ws_col != NULL) *ws_col = 120; + if (ws_row != NULL) *ws_row = 30; + } else { + if (ws_col != NULL) *ws_col = w.ws_col; + if (ws_row != NULL) *ws_row = w.ws_row; } +#endif +} - int32_t cursor_x = cursor_pos / w.ws_col; - int32_t cursor_y = cursor_pos % w.ws_col; - int32_t command_x = ecmd_pos / w.ws_col; +void shellClearScreen(int32_t ecmd_pos, int32_t cursor_pos) { + int32_t ws_col; + shellGetScreenSize(&ws_col, NULL); + + int32_t cursor_x = cursor_pos / ws_col; + int32_t cursor_y = cursor_pos % ws_col; + int32_t command_x = ecmd_pos / ws_col; shellPositionCursor(cursor_y, LEFT); shellPositionCursor(command_x - cursor_x, DOWN); fprintf(stdout, "\033[2K"); @@ -393,12 +339,8 @@ void shellClearScreen(int32_t ecmd_pos, int32_t cursor_pos) { } void shellShowOnScreen(SShellCmd *cmd) { - struct winsize w; - if (ioctl(0, TIOCGWINSZ, &w) < 0 || w.ws_col == 0 || w.ws_row == 0) { - // fprintf(stderr, "No stream device\n"); - w.ws_col = 120; - w.ws_row = 30; - } + int32_t ws_col; + shellGetScreenSize(&ws_col, NULL); TdWchar wc; int32_t size = 0; @@ -411,8 +353,7 @@ void shellShowOnScreen(SShellCmd *cmd) { } else { sprintf(total_string, "%s%s", shell.info.promptContinue, cmd->command); } - - int32_t remain_column = w.ws_col; + int32_t remain_column = ws_col; for (char *str = total_string; size < cmd->commandSize + PSIZE;) { int32_t ret = taosMbToWchar(&wc, str, MB_CUR_MAX); if (ret < 0) break; @@ -425,10 +366,10 @@ void shellShowOnScreen(SShellCmd *cmd) { } else { if (remain_column == width) { printf("%lc\n\r", wc); - remain_column = w.ws_col; + remain_column = ws_col; } else { printf("\n\r%lc", wc); - remain_column = w.ws_col - width; + remain_column = ws_col - width; } } @@ -436,17 +377,16 @@ void shellShowOnScreen(SShellCmd *cmd) { } taosMemoryFree(total_string); - // Position the cursor int32_t cursor_pos = cmd->screenOffset + PSIZE; int32_t ecmd_pos = cmd->endOffset + PSIZE; - int32_t cursor_x = cursor_pos / w.ws_col; - int32_t cursor_y = cursor_pos % w.ws_col; - // int32_t cursor_y = cursor % w.ws_col; - int32_t command_x = ecmd_pos / w.ws_col; - int32_t command_y = ecmd_pos % w.ws_col; - // int32_t command_y = (command.size() + PSIZE) % w.ws_col; + int32_t cursor_x = cursor_pos / ws_col; + int32_t cursor_y = cursor_pos % ws_col; + // int32_t cursor_y = cursor % ws_col; + int32_t command_x = ecmd_pos / ws_col; + int32_t command_y = ecmd_pos % ws_col; + // int32_t command_y = (command.size() + PSIZE) % ws_col; shellPositionCursor(command_y, LEFT); shellPositionCursor(command_x, UP); shellPositionCursor(cursor_x, DOWN); @@ -490,7 +430,11 @@ int32_t shellReadCommand(char *command) { case 3: printf("\n"); shellResetCommand(&cmd, ""); - kill(0, SIGINT); + #ifdef WINDOWS + raise(SIGINT); + #else + kill(0, SIGINT); + #endif break; case 4: // EOF or Ctrl+D printf("\n"); @@ -503,7 +447,10 @@ int32_t shellReadCommand(char *command) { break; case '\n': case '\r': + #ifdef WINDOWS + #else printf("\n"); + #endif if (shellIsReadyGo(&cmd)) { sprintf(command, "%s%s", cmd.buffer, cmd.command); taosMemoryFreeClear(cmd.buffer); @@ -608,5 +555,3 @@ int32_t shellReadCommand(char *command) { return 0; } - -#endif \ No newline at end of file diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 8b126cf90e..1e832c0c46 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -307,7 +307,7 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i break; case TSDB_DATA_TYPE_DOUBLE: n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val)); - if (n > MAX(25, length)) { + if (n > TMAX(25, length)) { taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val)); } else { taosFprintfFile(pFile, "%s", buf); @@ -488,7 +488,7 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t break; case TSDB_DATA_TYPE_DOUBLE: n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", width, GET_DOUBLE_VAL(val)); - if (n > MAX(25, width)) { + if (n > TMAX(25, width)) { printf("%*.15e", width, GET_DOUBLE_VAL(val)); } else { printf("%s", buf);