From eb46b5fa08b2332153ba0345675c12f4b95fd6f6 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 30 Aug 2024 16:45:43 +0800 Subject: [PATCH] add tests for fix duplicate cols names --- include/libs/nodes/querynodes.h | 4 - include/util/tutil.h | 4 +- source/libs/nodes/src/nodesUtilFuncs.c | 50 +----- source/libs/parser/src/parTranslater.c | 2 - source/libs/planner/src/planLogicCreater.c | 8 +- source/libs/planner/src/planOptimizer.c | 7 +- source/libs/planner/src/planPhysiCreater.c | 13 +- tests/parallel_test/cases.task | 1 + tests/system-test/2-query/nestedQuery2.py | 167 +++++++++++++++++++++ 9 files changed, 189 insertions(+), 67 deletions(-) create mode 100644 tests/system-test/2-query/nestedQuery2.py diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index ab9f9598c0..d93f7fcdaa 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -451,7 +451,6 @@ typedef struct SSelectStmt { bool groupSort; bool tagScan; bool joinContains; - bool expandStar; } SSelectStmt; typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType; @@ -625,9 +624,6 @@ int32_t nodesCollectColumnsExt(SSelectStmt* pSelect, ESqlClause clause, SSHashOb SNodeList** pCols); int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols); -int32_t nodesCollectColumnsForTargets(SSelectStmt* pSelect, ESqlClause clause, const char* pTableAlias, ECollectColType type, - SNodeList** pCols); - typedef bool (*FFuncClassifier)(int32_t funcId); int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs); int32_t nodesCollectSelectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList* pFuncs); diff --git a/include/util/tutil.h b/include/util/tutil.h index fb9bd9f637..6c7517f630 100644 --- a/include/util/tutil.h +++ b/include/util/tutil.h @@ -82,9 +82,7 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar static FORCE_INLINE int32_t taosHashBinary(char* pBuf, int32_t len) { uint64_t hashVal = MurmurHash3_64(pBuf, len); - uInfo("wjm hash binary for: %s", pBuf); - int32_t ret = sprintf(pBuf, "%" PRIu64, hashVal); - return ret; + return sprintf(pBuf, "%" PRIu64, hashVal); } static FORCE_INLINE int32_t taosCreateMD5Hash(char *pBuf, int32_t len) { diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index f9f3c6e5c4..8900fb64dc 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -2250,7 +2250,6 @@ typedef struct SCollectColumnsCxt { ECollectColType collectType; SNodeList* pCols; SHashObj* pColHash; - bool collectForTarget; } SCollectColumnsCxt; static EDealRes doCollect(SCollectColumnsCxt* pCxt, SColumnNode* pCol, SNode* pNode) { @@ -2261,14 +2260,17 @@ static EDealRes doCollect(SCollectColumnsCxt* pCxt, SColumnNode* pCol, SNode* pN } else { len = snprintf(name, sizeof(name), "%s.%s", pCol->tableAlias, pCol->colName); } - if (NULL == taosHashGet(pCxt->pColHash, name, len)) { - pCxt->errCode = taosHashPut(pCxt->pColHash, name, len, NULL, 0); + if (pCol->projRefIdx > 0) { + len = taosHashBinary(name, strlen(name)); + len += sprintf(name + len, "_%d", pCol->projRefIdx); + } + SNode** pNodeFound = taosHashGet(pCxt->pColHash, name, len); + if (pNodeFound == NULL) { + pCxt->errCode = taosHashPut(pCxt->pColHash, name, len, &pNode, POINTER_BYTES); if (TSDB_CODE_SUCCESS == pCxt->errCode) { SNode* pNew = NULL; pCxt->errCode = nodesCloneNode(pNode, &pNew); if (TSDB_CODE_SUCCESS == pCxt->errCode) { - //((SColumnNode*)pNew)->projRefIdx = pCol->node.projIdx; - if (pCxt->collectForTarget) ((SColumnNode*)pNew)->resIdx = pCol->projRefIdx; pCxt->errCode = nodesListStrictAppend(pCxt->pCols, pNew); } } @@ -2307,44 +2309,6 @@ static EDealRes collectColumnsExt(SNode* pNode, void* pContext) { return DEAL_RES_CONTINUE; } -int32_t nodesCollectColumnsForTargets(SSelectStmt* pSelect, ESqlClause clause, const char* pTableAlias, ECollectColType type, - SNodeList** pCols) { - if (NULL == pSelect || NULL == pCols) { - return TSDB_CODE_FAILED; - } - SNodeList * pList = NULL; - if (!*pCols) { - int32_t code = nodesMakeList(&pList); - if (TSDB_CODE_SUCCESS != code) { - return code; - } - } - SCollectColumnsCxt cxt = { - .errCode = TSDB_CODE_SUCCESS, - .pTableAlias = pTableAlias, - .collectForTarget = true, - .collectType = type, - .pCols = (NULL == *pCols ? pList : *pCols), - .pColHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK)}; - if (NULL == cxt.pCols || NULL == cxt.pColHash) { - return TSDB_CODE_OUT_OF_MEMORY; - } - *pCols = NULL; - nodesWalkSelectStmt(pSelect, clause, collectColumns, &cxt); - taosHashCleanup(cxt.pColHash); - if (TSDB_CODE_SUCCESS != cxt.errCode) { - nodesDestroyList(cxt.pCols); - return cxt.errCode; - } - if (LIST_LENGTH(cxt.pCols) > 0) { - *pCols = cxt.pCols; - } else { - nodesDestroyList(cxt.pCols); - } - - return TSDB_CODE_SUCCESS; -} - int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* pTableAlias, ECollectColType type, SNodeList** pCols) { if (NULL == pSelect || NULL == pCols) { diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 1e9891b115..1046d52a8d 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -1379,14 +1379,12 @@ static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName); } code = setColumnInfoByExpr(pTempTable, pExpr, pColRef); - //(*pColRef)->projRefIdx = pExpr->projIdx; if (TSDB_CODE_SUCCESS != code) { break; } *pFound = true; } else if (isPrimaryKeyImpl(pNode) && isInternalPrimaryKey(pCol)) { code = setColumnInfoByExpr(pTempTable, pExpr, pColRef); - //(*pColRef)->projRefIdx = pExpr->projIdx; if (TSDB_CODE_SUCCESS != code) break; pCol->isPrimTs = true; *pFound = true; diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 08e22ba1cc..3d13ad4c62 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -1346,7 +1346,10 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect pSort->node.resultDataOrder = isPrimaryKeySort(pSelect->pOrderByList) ? (pSort->groupSort ? DATA_ORDER_LEVEL_IN_GROUP : DATA_ORDER_LEVEL_GLOBAL) : DATA_ORDER_LEVEL_NONE; - code = nodesCollectColumnsForTargets(pSelect, SQL_CLAUSE_ORDER_BY, NULL, COLLECT_COL_TYPE_ALL, &pSort->node.pTargets); + code = nodesCollectColumns(pSelect, SQL_CLAUSE_ORDER_BY, NULL, COLLECT_COL_TYPE_ALL, &pSort->node.pTargets); + if (TSDB_CODE_SUCCESS == code) { + rewriteTargetsWithResId(pSort->node.pTargets); + } if (TSDB_CODE_SUCCESS == code && NULL == pSort->node.pTargets) { SNode* pNew = NULL; code = nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0), &pNew); @@ -1467,6 +1470,9 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS code = nodesListMakeStrictAppend(&pPartition->node.pTargets, pNew); } } + if (TSDB_CODE_SUCCESS == code) { + rewriteTargetsWithResId(pPartition->node.pTargets); + } if (TSDB_CODE_SUCCESS == code) { // code = nodesCollectFuncs(pSelect, SQL_CLAUSE_GROUP_BY, NULL, fmIsAggFunc, &pPartition->pAggFuncs); diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 5743d557bf..f234509561 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -4907,16 +4907,13 @@ static EDealRes mergeProjectionsExpr2(SNode** pNode, void* pContext) { int32_t projIdx = 1; FOREACH(pProjection, pChildProj->pProjections) { if (isColRefExpr(pProjCol, (SExprNode*)pProjection)) { - - //} - //if (0 == strcmp(((SColumnNode*)(*pNode))->colName, ((SExprNode*)pProjection)->aliasName)) { SNode* pExpr = NULL; pCxt->errCode = nodesCloneNode(pProjection, &pExpr); if (pExpr == NULL) { return DEAL_RES_ERROR; } snprintf(((SExprNode*)pExpr)->aliasName, sizeof(((SExprNode*)pExpr)->aliasName), "%s", - ((SExprNode*)*pNode)->aliasName);// 保留外层project的aliasname, 外层project的aliasName是被改写过的. + ((SExprNode*)*pNode)->aliasName); nodesDestroyNode(*pNode); *pNode = pExpr; return DEAL_RES_IGNORE_CHILD; @@ -4932,7 +4929,7 @@ static EDealRes mergeProjectionsExpr(SNode** pNode, void* pContext) { if (QUERY_NODE_COLUMN == nodeType(*pNode)) { SNode* pTarget; FOREACH(pTarget, ((SLogicNode*)pChildProj)->pTargets) { - if (nodesEqualNode(pTarget, *pNode)) { // pNode是projectlist里的, aliasName被改写成了expr_#, 而pTarget是根据childProject的projectlist生成的, node里面啥都没有 + if (nodesEqualNode(pTarget, *pNode)) { SNode* pProjection; FOREACH(pProjection, pChildProj->pProjections) { if (0 == strcmp(((SColumnNode*)pTarget)->colName, ((SExprNode*)pProjection)->aliasName)) { diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index d018f89dc9..2b0f449b77 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -235,12 +235,10 @@ static int32_t buildDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SD if (TSDB_CODE_SUCCESS == code) { code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pNode, slotId, true, false)); } - qInfo("wjm append slot to hash name: %s, slotId: %d, aliasName: %s", name, slotId, ((SExprNode*)pNode)->aliasName); code = putSlotToHash(name, len, pDataBlockDesc->dataBlockId, slotId, pNode, pHash); if (TSDB_CODE_SUCCESS == code) { if (nodeType(pNode) == QUERY_NODE_COLUMN && ((SColumnNode*)pNode)->resIdx > 0) { - sprintf(name + strlen(name), "%d", ((SColumnNode*)pNode)->resIdx); - qInfo("wjm append slot name to projidx hash: %s, slotId: %d, aliasName: %s", name, slotId, ((SExprNode*)pNode)->aliasName); + sprintf(name + strlen(name), "_%d", ((SColumnNode*)pNode)->resIdx); code = putSlotToHash(name, strlen(name), pDataBlockDesc->dataBlockId, slotId, pNode, pProjIdxDescHash); } } @@ -326,7 +324,6 @@ static int32_t addDataBlockSlotsImpl(SPhysiPlanContext* pCxt, SNodeList* pList, } } - qInfo("wjm add datablock slots for: %s id: %d, aliasName: %s", name, slotId, ((SExprNode*)pNode)->aliasName); taosMemoryFree(name); if (TSDB_CODE_SUCCESS == code) { SNode* pTarget = NULL; @@ -402,13 +399,13 @@ static EDealRes doSetSlotId(SNode* pNode, void* pContext) { SSetSlotIdCxt* pCxt = (SSetSlotIdCxt*)pContext; char *name = NULL; int32_t len = 0; - pCxt->errCode = getSlotKey(pNode, NULL, &name, &len, 0); + pCxt->errCode = getSlotKey(pNode, NULL, &name, &len, 16); if (TSDB_CODE_SUCCESS != pCxt->errCode) { return DEAL_RES_ERROR; } SSlotIndex *pIndex = NULL; if (((SColumnNode*)pNode)->projRefIdx > 0) { - sprintf(name + strlen(name), "%d", ((SColumnNode*)pNode)->projRefIdx); + sprintf(name + strlen(name), "_%d", ((SColumnNode*)pNode)->projRefIdx); pIndex = taosHashGet(pCxt->pLeftProjIdxHash, name, strlen(name)); if (!pIndex) { pIndex = taosHashGet(pCxt->pRightProdIdxHash, name, strlen(name)); @@ -421,7 +418,7 @@ static EDealRes doSetSlotId(SNode* pNode, void* pContext) { } // pIndex is definitely not NULL, otherwise it is a bug if (NULL == pIndex) { - planError("wjm doSetSlotId failed, invalid slot name %s", name); + planError("doSetSlotId failed, invalid slot name %s", name); dumpSlots("left datablock desc", pCxt->pLeftHash); dumpSlots("right datablock desc", pCxt->pRightHash); pCxt->errCode = TSDB_CODE_PLAN_INTERNAL_ERROR; @@ -430,7 +427,6 @@ static EDealRes doSetSlotId(SNode* pNode, void* pContext) { } ((SColumnNode*)pNode)->dataBlockId = pIndex->dataBlockId; ((SColumnNode*)pNode)->slotId = ((SSlotIdInfo*)taosArrayGet(pIndex->pSlotIdsInfo, 0))->slotId; - qInfo("wjm set slotId for %s, slotId: %d, aliasName: %s", name, ((SColumnNode*)pNode)->slotId, ((SExprNode*)pNode)->aliasName); taosMemoryFree(name); return DEAL_RES_IGNORE_CHILD; } @@ -499,7 +495,6 @@ static SPhysiNode* makePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode terrno = code; return NULL; } - qInfo("wjm create node: %s", nodesNodeName(type)); TSWAP(pPhysiNode->pLimit, pLogicNode->pLimit); TSWAP(pPhysiNode->pSlimit, pLogicNode->pSlimit); diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 3ec1d7c428..9913052a54 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -142,6 +142,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsma2.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsma2.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tsma2.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery2.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqShow.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb0.py diff --git a/tests/system-test/2-query/nestedQuery2.py b/tests/system-test/2-query/nestedQuery2.py new file mode 100644 index 0000000000..dce119b1f2 --- /dev/null +++ b/tests/system-test/2-query/nestedQuery2.py @@ -0,0 +1,167 @@ +from random import randrange +import time +import threading +import secrets +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +# from tmqCommon import * + +class TDTestCase: + updatecfgDict = {'asynclog': 0, 'ttlUnit': 1, 'ttlPushInterval': 5, 'ratioOfVnodeStreamThrea': 4} + + def __init__(self): + self.vgroups = 4 + self.ctbNum = 10 + self.rowsPerTbl = 10000 + self.duraion = '1h' + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def create_database(self, tsql, dbName, dropFlag=1, vgroups=2, replica=1, duration: str = '1d'): + if dropFlag == 1: + tsql.execute("drop database if exists %s" % (dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d duration %s" % ( + dbName, vgroups, replica, duration)) + tdLog.debug("complete to create database %s" % (dbName)) + return + + def create_stable(self, tsql, paraDict): + colString = tdCom.gen_column_type_str( + colname_prefix=paraDict["colPrefix"], column_elm_list=paraDict["colSchema"]) + tagString = tdCom.gen_tag_type_str( + tagname_prefix=paraDict["tagPrefix"], tag_elm_list=paraDict["tagSchema"]) + sqlString = f"create table if not exists %s.%s (%s) tags (%s)" % ( + paraDict["dbName"], paraDict["stbName"], colString, tagString) + tdLog.debug("%s" % (sqlString)) + tsql.execute(sqlString) + return + + def create_ctable(self, tsql=None, dbName='dbx', stbName='stb', ctbPrefix='ctb', ctbNum=1, ctbStartIdx=0): + for i in range(ctbNum): + sqlString = "create table %s.%s%d using %s.%s tags(%d, 'tb%d', 'tb%d', %d, %d, %d)" % (dbName, ctbPrefix, i+ctbStartIdx, dbName, stbName, (i+ctbStartIdx) % 5, i+ctbStartIdx + random.randint( + 1, 100), i+ctbStartIdx + random.randint(1, 100), i+ctbStartIdx + random.randint(1, 100), i+ctbStartIdx + random.randint(1, 100), i+ctbStartIdx + random.randint(1, 100)) + tsql.execute(sqlString) + + tdLog.debug("complete to create %d child tables by %s.%s" % + (ctbNum, dbName, stbName)) + return + + def init_normal_tb(self, tsql, db_name: str, tb_name: str, rows: int, start_ts: int, ts_step: int): + sql = 'CREATE TABLE %s.%s (ts timestamp, c1 INT, c2 INT, c3 INT, c4 double, c5 VARCHAR(255))' % ( + db_name, tb_name) + tsql.execute(sql) + sql = 'INSERT INTO %s.%s values' % (db_name, tb_name) + for j in range(rows): + sql += f'(%d, %d,%d,%d,{random.random()},"varchar_%d"),' % (start_ts + j * ts_step + randrange(500), j % + 10 + randrange(200), j % 10, j % 10, j % 10 + randrange(100)) + tsql.execute(sql) + + def insert_data(self, tsql, dbName, ctbPrefix, ctbNum, rowsPerTbl, batchNum, startTs, tsStep): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" % dbName) + pre_insert = "insert into " + sql = pre_insert + + for i in range(ctbNum): + rowsBatched = 0 + sql += " %s.%s%d values " % (dbName, ctbPrefix, i) + for j in range(rowsPerTbl): + if (i < ctbNum/2): + sql += "(%d, %d, %d, %d,%d,%d,%d,true,'binary%d', 'nchar%d') " % (startTs + j*tsStep + randrange( + 500), j % 10 + randrange(100), j % 10 + randrange(200), j % 10, j % 10, j % 10, j % 10, j % 10, j % 10) + else: + sql += "(%d, %d, NULL, %d,NULL,%d,%d,true,'binary%d', 'nchar%d') " % ( + startTs + j*tsStep + randrange(500), j % 10, j % 10, j % 10, j % 10, j % 10, j % 10) + rowsBatched += 1 + if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsBatched = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s.%s%d values " % (dbName, ctbPrefix, i) + else: + sql = "insert into " + if sql != pre_insert: + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def init_data(self, db: str = 'test', ctb_num: int = 10, rows_per_ctb: int = 10000, start_ts: int = 1537146000000, ts_step: int = 500): + tdLog.printNoPrefix( + "======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': db, + 'dropFlag': 1, + 'vgroups': 2, + 'stbName': 'meters', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count': 1}, {'type': 'BIGINT', 'count': 1}, {'type': 'FLOAT', 'count': 1}, {'type': 'DOUBLE', 'count': 1}, {'type': 'smallint', 'count': 1}, {'type': 'tinyint', 'count': 1}, {'type': 'bool', 'count': 1}, {'type': 'binary', 'len': 10, 'count': 1}, {'type': 'nchar', 'len': 10, 'count': 1}], + 'tagSchema': [{'type': 'INT', 'count': 1}, {'type': 'nchar', 'len': 20, 'count': 1}, {'type': 'binary', 'len': 20, 'count': 1}, {'type': 'BIGINT', 'count': 1}, {'type': 'smallint', 'count': 1}, {'type': 'DOUBLE', 'count': 1}], + 'ctbPrefix': 't', + 'ctbStartIdx': 0, + 'ctbNum': ctb_num, + 'rowsPerTbl': rows_per_ctb, + 'batchNum': 3000, + 'startTs': start_ts, + 'tsStep': ts_step} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = ctb_num + paraDict['rowsPerTbl'] = rows_per_ctb + + tdLog.info("create database") + self.create_database(tsql=tdSql, dbName=paraDict["dbName"], dropFlag=paraDict["dropFlag"], + vgroups=paraDict["vgroups"], replica=self.replicaVar, duration=self.duraion) + + tdLog.info("create stb") + self.create_stable(tsql=tdSql, paraDict=paraDict) + + tdLog.info("create child tables") + self.create_ctable(tsql=tdSql, dbName=paraDict["dbName"], + stbName=paraDict["stbName"], ctbPrefix=paraDict["ctbPrefix"], + ctbNum=paraDict["ctbNum"], ctbStartIdx=paraDict["ctbStartIdx"]) + self.insert_data(tsql=tdSql, dbName=paraDict["dbName"], + ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"], + rowsPerTbl=paraDict["rowsPerTbl"], batchNum=paraDict["batchNum"], + startTs=paraDict["startTs"], tsStep=paraDict["tsStep"]) + self.init_normal_tb(tdSql, paraDict['dbName'], 'norm_tb', + paraDict['rowsPerTbl'], paraDict['startTs'], paraDict['tsStep']) + + def test_select_asterisk_from_subquery_with_duplicate_aliasname(self): + sql = "select * from (select c8 as a, c9 as a from t1 order by ts desc limit 10)t;" + tdSql.query(sql, queryTimes=1) + tdSql.checkData(0, 0, "binary9") + tdSql.checkData(0, 1, "nchar9") + sql = "select * from (select c8 as a, c9 as a, ts from t1 order by ts desc limit 10)t order by ts desc;" + tdSql.query(sql, queryTimes=1) + tdSql.checkData(0, 0, "binary9") + tdSql.checkData(0, 1, "nchar9") + sql = "select * from (select c8 as a, c9 as a, ts, t1 from t1 order by ts desc limit 10)t partition by t1 order by ts desc;" + tdSql.query(sql, queryTimes=1) + tdSql.checkData(0, 0, "binary9") + tdSql.checkData(0, 1, "nchar9") + sql = " select * from (select a.c8, b.c8, a.ts, a.t1,b.t1 from t1 a, t3 b where a.ts = b.ts order by a.ts)ttt" + tdSql.query(sql, queryTimes=1) + + tdSql.checkData(0, 3, 1) + tdSql.checkData(0, 4, 3) + + def run(self): + self.init_data() + self.test_select_asterisk_from_subquery_with_duplicate_aliasname() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase())