diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index e9e848f1b0..e4a91b73d5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -28,13 +28,16 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p // bool allNullRow = true; if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST)) { + uint64_t ts = 0; + SFirstLastRes* p; for (int32_t i = 0; i < pReader->numOfCols; ++i) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]); - SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[i]); int32_t slotId = slotIds[i]; SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i); + p = (SFirstLastRes*)varDataVal(pRes[i]); p->ts = pColVal->ts; + ts = p->ts; p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal); // allNullRow = p->isNull & allNullRow; @@ -55,6 +58,20 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p varDataSetLen(pRes[i], pColInfoData->info.bytes - VARSTR_HEADER_SIZE); colDataSetVal(pColInfoData, numOfRows, (const char*)pRes[i], false); } + for (int32_t idx = 0; idx < taosArrayGetSize(pBlock->pDataBlock); ++idx) { + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, idx); + if (pCol->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + colDataSetVal(pCol, numOfRows, (const char*)&ts, false); + continue; + } + if (pReader->numOfCols == 1 && dstSlotIds[0] != idx) { + if (!p->isNull) { + colDataSetVal(pCol, numOfRows, p->buf, false); + } else { + colDataSetNULL(pCol, numOfRows); + } + } + } // pBlock->info.rows += allNullRow ? 0 : 1; ++pBlock->info.rows; diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c index 29d098494b..a7b4fe02f6 100644 --- a/source/libs/executor/src/cachescanoperator.c +++ b/source/libs/executor/src/cachescanoperator.c @@ -191,9 +191,9 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { SSDataBlock* pRes = pInfo->pRes; if (pInfo->indexOfBufferedRes < pInfo->pBufferredRes->info.rows) { - for (int32_t i = 0; i < taosArrayGetSize(pInfo->matchInfo.pList); ++i) { - SColMatchItem* pMatchInfo = taosArrayGet(pInfo->matchInfo.pList, i); - int32_t slotId = pMatchInfo->dstSlotId; + for (int32_t i = 0; i < taosArrayGetSize(pInfo->pBufferredRes->pDataBlock); ++i) { + SColumnInfoData* pCol = taosArrayGet(pInfo->pBufferredRes->pDataBlock, i); + int32_t slotId = pCol->info.slotId; SColumnInfoData* pSrc = taosArrayGet(pInfo->pBufferredRes->pDataBlock, slotId); SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, slotId); @@ -201,8 +201,10 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) { if (colDataIsNull_s(pSrc, pInfo->indexOfBufferedRes)) { colDataSetNULL(pDst, 0); } else { - char* p = colDataGetData(pSrc, pInfo->indexOfBufferedRes); - colDataSetVal(pDst, 0, p, false); + if (pSrc->pData) { + char* p = colDataGetData(pSrc, pInfo->indexOfBufferedRes); + colDataSetVal(pDst, 0, p, false); + } } } diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 84aff9fa88..74b7218591 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2772,7 +2772,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_cache_last", .type = FUNCTION_TYPE_CACHE_LAST, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_SYSTABLE_FUNC, .translateFunc = translateFirstLast, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 8d4c042960..0b3a432bec 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2478,6 +2478,27 @@ static bool hasSuitableCache(int8_t cacheLastMode, bool hasLastRow, bool hasLast return false; } +/// @brief check if we can apply last row scan optimization +/// @param lastColNum how many distinct last col specified +/// @param lastColId only used when lastColNum equals 1, the col id of the only one last col +/// @param selectNonPKColNum num of normal cols +/// @param selectNonPKColId only used when selectNonPKColNum equals 1, the col id of the only one select col +static bool lastRowScanOptCheckColNum(int32_t lastColNum, col_id_t lastColId, + int32_t selectNonPKColNum, col_id_t selectNonPKColId) { + // multi select non pk col + last func: select c1, c2, last(c1) + if (selectNonPKColNum > 1 && lastColNum > 0) return false; + + if (selectNonPKColNum == 1) { + // select last(c1), last(c2), c1 ... + // which is not possible currently + if (lastColNum > 1) return false; + + // select last(c1), c2 ... + if (lastColNum == 1 && lastColId != selectNonPKColId) return false; + } + return true; +} + static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_AGG != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren) || QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pNode->pChildren, 0))) { @@ -2493,9 +2514,10 @@ static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) { return false; } - bool hasLastFunc = false; - bool hasSelectFunc = false; - SNode* pFunc = NULL; + bool hasNonPKSelectFunc = false; + SNode* pFunc = NULL; + int32_t lastColNum = 0, selectNonPKColNum = 0; + col_id_t lastColId = -1, selectNonPKColId = -1; FOREACH(pFunc, ((SAggLogicNode*)pNode)->pAggFuncs) { SFunctionNode* pAggFunc = (SFunctionNode*)pFunc; if (FUNCTION_TYPE_LAST == pAggFunc->funcType) { @@ -2505,16 +2527,33 @@ static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) { if (pCol->colType != COLUMN_TYPE_COLUMN) { return false; } + if (lastColId != pCol->colId) { + lastColId = pCol->colId; + lastColNum++; + } } - if (hasSelectFunc || QUERY_NODE_VALUE == nodeType(nodesListGetNode(pAggFunc->pParameterList, 0))) { + if (QUERY_NODE_VALUE == nodeType(nodesListGetNode(pAggFunc->pParameterList, 0))) { return false; } - hasLastFunc = true; + if (!lastRowScanOptCheckColNum(lastColNum, lastColId, selectNonPKColNum, selectNonPKColId)) + return false; } else if (FUNCTION_TYPE_SELECT_VALUE == pAggFunc->funcType) { - if (hasLastFunc) { + SNode* pParam = nodesListGetNode(pAggFunc->pParameterList, 0); + if (QUERY_NODE_COLUMN == nodeType(pParam)) { + SColumnNode* pCol = (SColumnNode*)pParam; + if (PRIMARYKEY_TIMESTAMP_COL_ID != pCol->colId) { + if (selectNonPKColId != pCol->colId) { + selectNonPKColId = pCol->colId; + selectNonPKColNum++; + } + } else { + continue; + } + } else if (lastColNum > 0) { return false; } - hasSelectFunc = true; + if (!lastRowScanOptCheckColNum(lastColNum, lastColId, selectNonPKColNum, selectNonPKColId)) + return false; } else if (FUNCTION_TYPE_GROUP_KEY == pAggFunc->funcType) { if (!lastRowScanOptLastParaIsTag(nodesListGetNode(pAggFunc->pParameterList, 0))) { return false; @@ -2581,6 +2620,9 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic SLastRowScanOptSetColDataTypeCxt cxt = {.doAgg = true, .pLastCols = NULL}; SNode* pNode = NULL; + SColumnNode* pPKTsCol = NULL; + SColumnNode* pNonPKCol = NULL; + FOREACH(pNode, pAgg->pAggFuncs) { SFunctionNode* pFunc = (SFunctionNode*)pNode; int32_t funcType = pFunc->funcType; @@ -2597,6 +2639,16 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptSetColDataType, &cxt); nodesListErase(pFunc->pParameterList, nodesListGetCell(pFunc->pParameterList, 1)); } + } else if (FUNCTION_TYPE_SELECT_VALUE) { + pNode = nodesListGetNode(pFunc->pParameterList, 0); + if (nodeType(pNode) == QUERY_NODE_COLUMN) { + SColumnNode* pCol = (SColumnNode*)pNode; + if (pCol->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + pPKTsCol = pCol; + } else { + pNonPKCol = pCol; + } + } } } @@ -2608,6 +2660,16 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic lastRowScanOptSetLastTargets(pScan->pScanCols, cxt.pLastCols, true); nodesWalkExprs(pScan->pScanPseudoCols, lastRowScanOptSetColDataType, &cxt); lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols, false); + if (pPKTsCol && pScan->node.pTargets->length == 1) { + // when select last(ts),ts from ..., we add another ts to targets + sprintf(pPKTsCol->colName, "#sel_val.%p", pPKTsCol); + nodesListAppend(pScan->node.pTargets, nodesCloneNode((SNode*)pPKTsCol)); + } + if (pNonPKCol && cxt.pLastCols->length == 1 && nodesEqualNode((SNode*)pNonPKCol, nodesListGetNode(cxt.pLastCols, 0))) { + // when select last(c1), c1 from ..., we add c1 to targets + sprintf(pNonPKCol->colName, "#sel_val.%p", pNonPKCol); + nodesListAppend(pScan->node.pTargets, nodesCloneNode((SNode*)pNonPKCol)); + } nodesClearList(cxt.pLastCols); } pAgg->hasLastRow = false; diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 15cb1f034f..2795c22a07 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -69,6 +69,10 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/func_to_char_timestamp.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/func_to_char_timestamp.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/func_to_char_timestamp.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_cache_scan.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_cache_scan.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_cache_scan.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_cache_scan.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqShow.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb0.py diff --git a/tests/system-test/2-query/last_cache_scan.py b/tests/system-test/2-query/last_cache_scan.py new file mode 100644 index 0000000000..fb5c8bcee2 --- /dev/null +++ b/tests/system-test/2-query/last_cache_scan.py @@ -0,0 +1,279 @@ +import taos +import sys +import time +import socket +import os +import threading +import math +from datetime import datetime + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +# from tmqCommon import * + +COMPARE_DATA = 0 +COMPARE_LEN = 1 + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 10 + self.rowsPerTbl = 10000 + self.duraion = '1h' + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d duration %s CACHEMODEL 'both'"%(dbName, vgroups, replica, duration)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, paraDict): + colString = tdCom.gen_column_type_str(colname_prefix=paraDict["colPrefix"], column_elm_list=paraDict["colSchema"]) + tagString = tdCom.gen_tag_type_str(tagname_prefix=paraDict["tagPrefix"], tag_elm_list=paraDict["tagSchema"]) + sqlString = f"create table if not exists %s.%s (%s) tags (%s)"%(paraDict["dbName"], paraDict["stbName"], colString, tagString) + tdLog.debug("%s"%(sqlString)) + tsql.execute(sqlString) + return + + def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1,ctbStartIdx=0): + for i in range(ctbNum): + sqlString = "create table %s.%s%d using %s.%s tags(%d, 'tb%d', 'tb%d', %d, %d, %d)" % \ + (dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,(i+ctbStartIdx) % 5,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx) + tsql.execute(sqlString) + + tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs,tsStep): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + for i in range(ctbNum): + rowsBatched = 0 + sql += " %s%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + if (i < ctbNum/2): + sql += "(%d, %d, %d, %d,%d,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%1000, j%500, j%1000, j%5000, j%5400, j%128, j%10000, j%1000) + else: + sql += "(%d, %d, NULL, %d,NULL,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%1000, j%500, j%1000, j%128, j%10000, j%1000) + rowsBatched += 1 + if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsBatched = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s%d values " %(ctbPrefix,i) + else: + sql = "insert into " + if sql != pre_insert: + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'test', + 'dropFlag': 1, + 'vgroups': 2, + 'stbName': 'meters', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1}, + {'type': 'BIGINT', 'count':1}, + {'type': 'FLOAT', 'count':1}, + {'type': 'DOUBLE', 'count':1}, + {'type': 'smallint', 'count':1}, + {'type': 'tinyint', 'count':1}, + {'type': 'bool', 'count':1}, + {'type': 'binary', 'len':10, 'count':1}, + {'type': 'nchar', 'len':10, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'nchar', 'len':20, 'count':1},{'type': 'binary', 'len':20, 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'smallint', 'count':1},{'type': 'DOUBLE', 'count':1}], + 'ctbPrefix': 't', + 'ctbStartIdx': 0, + 'ctbNum': 100, + 'rowsPerTbl': 10000, + 'batchNum': 3000, + 'startTs': 1537146000000, + 'tsStep': 600000} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdLog.info("create database") + self.create_database(tsql=tdSql, dbName=paraDict["dbName"], dropFlag=paraDict["dropFlag"], vgroups=paraDict["vgroups"], replica=self.replicaVar, duration=self.duraion) + + tdLog.info("create stb") + self.create_stable(tsql=tdSql, paraDict=paraDict) + + tdLog.info("create child tables") + self.create_ctable(tsql=tdSql, dbName=paraDict["dbName"], \ + stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],\ + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict["ctbStartIdx"]) + self.insert_data(tsql=tdSql, dbName=paraDict["dbName"],\ + ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],\ + rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],\ + startTs=paraDict["startTs"],tsStep=paraDict["tsStep"]) + return + + def check_explain_res_has_row(self, plan_str_expect: str, rows, sql): + plan_found = False + for row in rows: + if str(row).find(plan_str_expect) >= 0: + tdLog.debug("plan: [%s] found in: [%s]" % (plan_str_expect, str(row))) + plan_found = True + break + if not plan_found: + tdLog.exit("plan: %s not found in res: [%s] in sql: %s" % (plan_str_expect, str(rows), sql)) + + def check_explain_res_no_row(self, plan_str_not_expect: str, res, sql): + for row in res: + if str(row).find(plan_str_not_expect) >= 0: + tdLog.exit('plan: [%s] found in: [%s] for sql: %s' % (plan_str_not_expect, str(row), sql)) + + def explain_sql(self, sql: str): + sql = "explain verbose true " + sql + tdSql.query(sql, queryTimes=1) + return tdSql.queryResult + + def explain_and_check_res(self, sqls, hasLastRowScanRes): + for sql, has_last in zip(sqls, hasLastRowScanRes): + res = self.explain_sql(sql) + if has_last == 1: + self.check_explain_res_has_row("Last Row Scan", res, sql) + else: + self.check_explain_res_no_row("Last Row Scan", res, sql) + + def format_sqls(self, sql_template, select_items): + sqls = [] + for item in select_items: + sqls.append(sql_template % item) + return sqls + + def query_check_one(self, sql, res_expect): + if res_expect is not None: + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(1) + for i in range(0, tdSql.queryCols): + tdSql.checkData(0, i, res_expect[i]) + tdLog.info('%s check res col: %d succeed value: %s' % (sql, i, str(res_expect[i]))) + + def query_check_sqls(self, sqls, has_last_row_scan_res, res_expect): + for sql, has_last, res in zip(sqls, has_last_row_scan_res, res_expect): + if has_last == 1: + self.query_check_one(sql, res) + + def test_last_cache_scan(self): + sql_template = 'select %s from meters' + select_items = [ + "last(ts), ts", "last(ts), c1", "last(ts), c2", "last(ts), c3",\ + "last(ts), c4", "last(ts), tbname", "last(ts), t1", "last(ts), ts, ts"] + has_last_row_scan_res = [1, 0, 0, 0, 0, 0, 0, 1] + res_expect = [ + ["2018-11-25 19:30:00.000", "2018-11-25 19:30:00.000"], + None, None, None, None, None, None, + ["2018-11-25 19:30:00.000", "2018-11-25 19:30:00.000", "2018-11-25 19:30:00.000"] + ] + sqls = self.format_sqls(sql_template, select_items) + self.explain_and_check_res(sqls, has_last_row_scan_res) + self.query_check_sqls(sqls, has_last_row_scan_res, res_expect) + + select_items = ["last(c1),ts", "last(c1), c1", "last(c1), c2", "last(c1), c3",\ + "last(c1), c4", "last(c1), tbname", "last(c1), t1", "last(c1), ts, ts", "last(c1), c1, c1"] + has_last_row_scan_res = [1, 1, 0, 0, 0, 0, 0, 1, 1] + res_expect = [ + [999, "2018-11-25 19:30:00.000"], + [999, 999], None, None, None, None, None, + [999, "2018-11-25 19:30:00.000", "2018-11-25 19:30:00.000"], + [999,999,999] + ] + sqls = self.format_sqls(sql_template, select_items) + self.explain_and_check_res(sqls, has_last_row_scan_res) + self.query_check_sqls(sqls, has_last_row_scan_res, res_expect) + + select_items = ["last(c4),ts", "last(c4), c1", "last(c4), c2", "last(c4), c3",\ + "last(c4), c4", "last(c4), tbname", "last(c4), t1"] + has_last_row_scan_res = [1, 0, 0, 0, 1, 0, 0] + res_expect = [ + [4999.000000000000000, "2018-11-25 19:30:00.000"], + None,None,None, + [4999.000000000000000, 4999.000000000000000] + ] + sqls = self.format_sqls(sql_template, select_items) + self.explain_and_check_res(sqls, has_last_row_scan_res) + self.query_check_sqls(sqls, has_last_row_scan_res, res_expect) + + select_items = ["last(c8), ts", "last(c8), c1", "last(c8), c8", "last(c8), tbname", \ + "last(c8), t1", "last(c8), c8, c8", "last(c8), ts, ts"] + has_last_row_scan_res = [1, 0, 1, 0, 0, 1, 1] + res_expect = [ + ["binary9999", "2018-11-25 19:30:00.000"], + None, + ["binary9999", "binary9999"], + None, None, + ["binary9999", "binary9999", "binary9999"], + ["binary9999", "2018-11-25 19:30:00.000", "2018-11-25 19:30:00.000"] + ] + sqls = self.format_sqls(sql_template, select_items) + self.explain_and_check_res(sqls, has_last_row_scan_res) + self.query_check_sqls(sqls, has_last_row_scan_res, res_expect) + + # c2, c4 in last row of t5,t6,t7,t8,t9 will always be NULL + sql_template = 'select %s from t5' + select_items = ["last(c4), ts", "last(c4), c4", "last(c4), c4, c4", "last(c4), ts, ts"] + has_last_row_scan_res = [1,1,1,1] + + sqls = self.format_sqls(sql_template, select_items) + self.explain_and_check_res(sqls, has_last_row_scan_res) + for sql in sqls: + tdSql.query(sql, queryTimes=1) + tdSql.checkRows(0) + + sql_template = 'select %s from meters' + select_items = [ + "last_row(ts), last(ts)", + "last_row(c1), last(c1)", + "last_row(c1), c1,c3, ts" + ] + has_last_row_scan_res = [0,0,1] + sqls = self.format_sqls(sql_template, select_items) + self.explain_and_check_res(sqls, has_last_row_scan_res) + #res_expect = [None, None, [999, 999, 499, "2018-11-25 19:30:00.000"]] + #self.query_check_sqls(sqls, has_last_row_scan_res, res_expect) + + sql = "select last(c1), c1, c1+1, c1+2, ts from meters" + res = self.explain_sql(sql) + self.check_explain_res_has_row("Last Row Scan", res, sql) + + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0, 0, 999) + tdSql.checkData(0, 1, 999) + tdSql.checkData(0, 2, 1000) + tdSql.checkData(0, 3, 1001) + tdSql.checkData(0, 4, "2018-11-25 19:30:00.000") + + def run(self): + self.prepareTestEnv() + #time.sleep(99999999) + self.test_last_cache_scan() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase())