Merge pull request #27516 from taosdata/case/TD-31711-3.0
udf funciton return error for child query
This commit is contained in:
commit
e0fa9337a5
|
@ -1,110 +0,0 @@
|
||||||
import taos
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import glob
|
|
||||||
import shutil
|
|
||||||
import time
|
|
||||||
|
|
||||||
from frame.log import *
|
|
||||||
from frame.cases import *
|
|
||||||
from frame.sql import *
|
|
||||||
from frame.srvCtl import *
|
|
||||||
from frame.caseBase import *
|
|
||||||
from frame import *
|
|
||||||
from frame.autogen import *
|
|
||||||
# from frame.server.dnodes import *
|
|
||||||
# from frame.server.cluster import *
|
|
||||||
|
|
||||||
|
|
||||||
class TDTestCase(TBase):
|
|
||||||
updatecfgDict = {
|
|
||||||
'slowLogScope':"query"
|
|
||||||
}
|
|
||||||
|
|
||||||
def init(self, conn, logSql, replicaVar=3):
|
|
||||||
super(TDTestCase, self).init(conn, logSql, replicaVar=3, db="snapshot", checkColName="c1")
|
|
||||||
self.valgrind = 0
|
|
||||||
self.childtable_count = 10
|
|
||||||
# tdSql.init(conn.cursor())
|
|
||||||
tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
tdSql.prepare()
|
|
||||||
autoGen = AutoGen()
|
|
||||||
autoGen.create_db(self.db, 2, 3)
|
|
||||||
tdSql.execute(f"use {self.db}")
|
|
||||||
autoGen.create_stable(self.stb, 5, 10, 8, 8)
|
|
||||||
autoGen.create_child(self.stb, "d", self.childtable_count)
|
|
||||||
autoGen.insert_data(1000)
|
|
||||||
tdSql.execute(f"flush database {self.db}")
|
|
||||||
sc.dnodeStop(3)
|
|
||||||
# clusterDnodes.stoptaosd(1)
|
|
||||||
# clusterDnodes.starttaosd(3)
|
|
||||||
# time.sleep(5)
|
|
||||||
# clusterDnodes.stoptaosd(2)
|
|
||||||
# clusterDnodes.starttaosd(1)
|
|
||||||
# time.sleep(5)
|
|
||||||
autoGen.insert_data(5000, True)
|
|
||||||
self.flushDb(True)
|
|
||||||
# wait flush operation over
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
# sql = 'show vnodes;'
|
|
||||||
# while True:
|
|
||||||
# bFinish = True
|
|
||||||
# param_list = tdSql.query(sql, row_tag=True)
|
|
||||||
# for param in param_list:
|
|
||||||
# if param[3] == 'leading' or param[3] == 'following':
|
|
||||||
# bFinish = False
|
|
||||||
# break
|
|
||||||
# if bFinish:
|
|
||||||
# break
|
|
||||||
self.snapshotAgg()
|
|
||||||
time.sleep(10)
|
|
||||||
sc.dnodeStopAll()
|
|
||||||
for i in range(1, 4):
|
|
||||||
path = clusterDnodes.getDnodeDir(i)
|
|
||||||
dnodesRootDir = os.path.join(path,"data","vnode", "vnode*")
|
|
||||||
dirs = glob.glob(dnodesRootDir)
|
|
||||||
for dir in dirs:
|
|
||||||
if os.path.isdir(dir):
|
|
||||||
self.remove_directory(os.path.join(dir, "wal"))
|
|
||||||
|
|
||||||
sc.dnodeStart(1)
|
|
||||||
sc.dnodeStart(2)
|
|
||||||
sc.dnodeStart(3)
|
|
||||||
sql = "show vnodes;"
|
|
||||||
time.sleep(10)
|
|
||||||
while True:
|
|
||||||
bFinish = True
|
|
||||||
param_list = tdSql.query(sql, row_tag=True)
|
|
||||||
for param in param_list:
|
|
||||||
if param[3] == 'offline':
|
|
||||||
tdLog.exit(
|
|
||||||
"dnode synchronous fail dnode id: %d, vgroup id:%d status offline" % (param[0], param[1]))
|
|
||||||
if param[3] == 'leading' or param[3] == 'following':
|
|
||||||
bFinish = False
|
|
||||||
break
|
|
||||||
if bFinish:
|
|
||||||
break
|
|
||||||
|
|
||||||
self.timestamp_step = 1000
|
|
||||||
self.insert_rows = 6000
|
|
||||||
self.checkInsertCorrect()
|
|
||||||
self.checkAggCorrect()
|
|
||||||
|
|
||||||
def remove_directory(self, directory):
|
|
||||||
try:
|
|
||||||
shutil.rmtree(directory)
|
|
||||||
tdLog.debug("delete dir: %s " % (directory))
|
|
||||||
except OSError as e:
|
|
||||||
tdLog.exit("delete fail dir: %s " % (directory))
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
tdSql.close()
|
|
||||||
tdLog.success(f"{__file__} successfully executed")
|
|
||||||
|
|
||||||
|
|
||||||
tdCases.addLinux(__file__, TDTestCase())
|
|
||||||
tdCases.addWindows(__file__, TDTestCase())
|
|
|
@ -50,8 +50,8 @@
|
||||||
{ "type": "usmallint", "name": "usi"},
|
{ "type": "usmallint", "name": "usi"},
|
||||||
{ "type": "uint", "name": "ui" },
|
{ "type": "uint", "name": "ui" },
|
||||||
{ "type": "ubigint", "name": "ubi"},
|
{ "type": "ubigint", "name": "ubi"},
|
||||||
{ "type": "binary", "name": "bin", "len": 32},
|
{ "type": "binary", "name": "bin", "len": 50},
|
||||||
{ "type": "nchar", "name": "nch", "len": 64}
|
{ "type": "nchar", "name": "nch", "len": 100}
|
||||||
],
|
],
|
||||||
"tags": [
|
"tags": [
|
||||||
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
|
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
|
|
@ -172,7 +172,7 @@ class TDTestCase(TBase):
|
||||||
if compact is not None:
|
if compact is not None:
|
||||||
kw3 = f"s3_compact {compact}"
|
kw3 = f"s3_compact {compact}"
|
||||||
|
|
||||||
sql = f" create database db1 duration 1h {kw1} {kw2} {kw3}"
|
sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}"
|
||||||
tdSql.execute(sql, show=True)
|
tdSql.execute(sql, show=True)
|
||||||
#sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';"
|
#sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';"
|
||||||
sql = f"select * from information_schema.ins_databases where name='db1';"
|
sql = f"select * from information_schema.ins_databases where name='db1';"
|
||||||
|
@ -327,8 +327,6 @@ class TDTestCase(TBase):
|
||||||
# check insert correct again
|
# check insert correct again
|
||||||
self.checkInsertCorrect()
|
self.checkInsertCorrect()
|
||||||
|
|
||||||
# checkBasic
|
|
||||||
self.checkBasic()
|
|
||||||
|
|
||||||
# check stream correct and drop stream
|
# check stream correct and drop stream
|
||||||
#self.checkStreamCorrect()
|
#self.checkStreamCorrect()
|
||||||
|
@ -338,6 +336,10 @@ class TDTestCase(TBase):
|
||||||
|
|
||||||
# insert history disorder data
|
# insert history disorder data
|
||||||
self.insertHistory()
|
self.insertHistory()
|
||||||
|
|
||||||
|
# checkBasic
|
||||||
|
self.checkBasic()
|
||||||
|
|
||||||
#self.checkInsertCorrect()
|
#self.checkInsertCorrect()
|
||||||
self.snapshotAgg()
|
self.snapshotAgg()
|
||||||
self.doAction()
|
self.doAction()
|
|
@ -50,8 +50,8 @@
|
||||||
{ "type": "usmallint", "name": "usi"},
|
{ "type": "usmallint", "name": "usi"},
|
||||||
{ "type": "uint", "name": "ui" },
|
{ "type": "uint", "name": "ui" },
|
||||||
{ "type": "ubigint", "name": "ubi"},
|
{ "type": "ubigint", "name": "ubi"},
|
||||||
{ "type": "binary", "name": "bin", "len": 32},
|
{ "type": "binary", "name": "bin", "len": 50},
|
||||||
{ "type": "nchar", "name": "nch", "len": 64}
|
{ "type": "nchar", "name": "nch", "len": 100}
|
||||||
],
|
],
|
||||||
"tags": [
|
"tags": [
|
||||||
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
|
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
|
|
@ -12,7 +12,7 @@
|
||||||
#
|
#
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py
|
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py
|
||||||
,,n,army,python3 ./test.py -f s3/s3Basic.py -N 3
|
,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py
|
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_function.py
|
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_function.py
|
||||||
|
|
|
@ -272,12 +272,24 @@ class TDTestCase:
|
||||||
if val is not None:
|
if val is not None:
|
||||||
tdLog.exit(f" check {sql} not expect None.")
|
tdLog.exit(f" check {sql} not expect None.")
|
||||||
|
|
||||||
# concat
|
# concat - stable
|
||||||
sql = f'select sf_concat_var(col12, t12), concat(col12, t12) from {self.stbname} limit 1000'
|
sql = f'select sf_concat_var(col12, t12), concat(col12, t12) from {self.stbname} limit 1000'
|
||||||
self.verify_same_value(sql)
|
self.verify_same_value(sql)
|
||||||
sql = f'select sf_concat_nch(col13, t13), concat(col13, t13) from {self.stbname} limit 1000'
|
sql = f'select sf_concat_nch(col13, t13), concat(col13, t13) from {self.stbname} limit 1000'
|
||||||
self.verify_same_value(sql)
|
self.verify_same_value(sql)
|
||||||
|
|
||||||
|
# concat - child table
|
||||||
|
sql = f'select sf_concat_var(col12, t12), concat(col12, t12) from {self.tbname}0 limit 1000'
|
||||||
|
self.verify_same_value(sql)
|
||||||
|
sql = f'select sf_concat_nch(col13, t13), concat(col13, t13) from {self.tbname}0 limit 1000'
|
||||||
|
self.verify_same_value(sql)
|
||||||
|
|
||||||
|
# single child
|
||||||
|
sql = f'select sf_concat_nch(col13, t13) from {self.tbname}1'
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkRows(5000)
|
||||||
|
|
||||||
|
|
||||||
# create aggregate
|
# create aggregate
|
||||||
def create_aggr_udfpy(self):
|
def create_aggr_udfpy(self):
|
||||||
|
|
||||||
|
@ -349,6 +361,12 @@ class TDTestCase:
|
||||||
self.verify_same_value(sql)
|
self.verify_same_value(sql)
|
||||||
sql = f'select count(col8), af_count_float(col8) from {self.stbname}'
|
sql = f'select count(col8), af_count_float(col8) from {self.stbname}'
|
||||||
self.verify_same_value(sql)
|
self.verify_same_value(sql)
|
||||||
|
# child
|
||||||
|
sql = f'select count(col8), af_count_float(col8) from {self.tbname}0'
|
||||||
|
self.verify_same_value(sql)
|
||||||
|
sql = f'select af_count_bigint(col7) from {self.tbname}1'
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkData(0, 0, 5000)
|
||||||
|
|
||||||
# nest
|
# nest
|
||||||
sql = f'select a+1000,b+1000 from (select count(col8) as a, af_count_float(col8) as b from {self.stbname})'
|
sql = f'select a+1000,b+1000 from (select count(col8) as a, af_count_float(col8) as b from {self.stbname})'
|
||||||
|
|
Loading…
Reference in New Issue