Merge pull request #15612 from taosdata/test/chr/TD-14699
test:modify test case of tsbs query
This commit is contained in:
commit
369e194bc5
|
@ -0,0 +1,290 @@
|
||||||
|
# from asyncio.windows_events import NULL
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import datetime
|
||||||
|
import inspect
|
||||||
|
import random
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.cluster import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
sys.path.append("./6-cluster/")
|
||||||
|
from clusterCommonCreate import *
|
||||||
|
from clusterCommonCheck import clusterComCheck
|
||||||
|
|
||||||
|
import threading
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
|
||||||
|
clientCfgDict = {'queryproxy': '1','debugFlag': 135}
|
||||||
|
clientCfgDict["debugFlag"] = 131
|
||||||
|
updatecfgDict = {'clientCfg': {}}
|
||||||
|
updatecfgDict = {'debugFlag': 131}
|
||||||
|
updatecfgDict = {'keepColumnName': 1}
|
||||||
|
updatecfgDict["clientCfg"] = clientCfgDict
|
||||||
|
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), True)
|
||||||
|
|
||||||
|
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
|
||||||
|
tsql.execute("use %s" %dbName)
|
||||||
|
pre_create = "create table"
|
||||||
|
sql = pre_create
|
||||||
|
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||||
|
for i in range(ctbNum):
|
||||||
|
tagValue = 'beijing'
|
||||||
|
if (i % 10 == 0):
|
||||||
|
sql += " %s%d using %s (name,fleet,driver,device_version) tags('truck_%d', 'South%d','Trish%d','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,i)
|
||||||
|
else:
|
||||||
|
model = 'H-%d'%i
|
||||||
|
sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,model,i)
|
||||||
|
if (i > 0) and (i%1000 == 0):
|
||||||
|
tsql.execute(sql)
|
||||||
|
sql = pre_create
|
||||||
|
if sql != pre_create:
|
||||||
|
tsql.execute(sql)
|
||||||
|
|
||||||
|
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
|
||||||
|
return
|
||||||
|
|
||||||
|
def prepareData(self):
|
||||||
|
dbname="db_tsbs"
|
||||||
|
stabname1="readings"
|
||||||
|
stabname2="diagnostics"
|
||||||
|
ctbnamePre1="rct"
|
||||||
|
ctbnamePre2="dct"
|
||||||
|
ctbNums=40
|
||||||
|
self.ctbNums=ctbNums
|
||||||
|
rowNUms=100
|
||||||
|
ts=1451606400000
|
||||||
|
tdSql.execute(f"create database {dbname};")
|
||||||
|
tdSql.execute(f"use {dbname} ")
|
||||||
|
tdSql.execute(f'''
|
||||||
|
create table {stabname1} (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30));
|
||||||
|
''')
|
||||||
|
tdSql.execute(f'''
|
||||||
|
create table {stabname2} (ts timestamp,fuel_state double,current_load double,status bigint,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)) ;
|
||||||
|
''')
|
||||||
|
self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums)
|
||||||
|
self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums)
|
||||||
|
|
||||||
|
|
||||||
|
for j in range(ctbNums):
|
||||||
|
for i in range(rowNUms):
|
||||||
|
tdSql.execute(
|
||||||
|
f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
|
||||||
|
)
|
||||||
|
status= random.randint(0,1)
|
||||||
|
tdSql.execute(
|
||||||
|
f"insert into dct{j} values ( {ts+i*60000}, {1+i*0.1},{1400+i*15}, {status},{1500+i*20}, {150+i*2},{5+i} )"
|
||||||
|
)
|
||||||
|
tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;")
|
||||||
|
# def check_avg(self ,origin_query , check_query):
|
||||||
|
# avg_result = tdSql.getResult(origin_query)
|
||||||
|
# origin_result = tdSql.getResult(check_query)
|
||||||
|
|
||||||
|
# check_status = True
|
||||||
|
# for row_index , row in enumerate(avg_result):
|
||||||
|
# for col_index , elem in enumerate(row):
|
||||||
|
# if avg_result[row_index][col_index] != origin_result[row_index][col_index]:
|
||||||
|
# check_status = False
|
||||||
|
# if not check_status:
|
||||||
|
# tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query )
|
||||||
|
# sys.exit(1)
|
||||||
|
# else:
|
||||||
|
# tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
|
||||||
|
|
||||||
|
|
||||||
|
def createCluster(self):
|
||||||
|
tdSql.execute("create mnode on dnode 2")
|
||||||
|
tdSql.execute("create mnode on dnode 3")
|
||||||
|
tdSql.execute("create qnode on dnode 1")
|
||||||
|
tdSql.execute("create qnode on dnode 2")
|
||||||
|
tdSql.execute("create qnode on dnode 3")
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def tsbsIotQuery(self,tdSql):
|
||||||
|
tdSql.execute("use db_tsbs")
|
||||||
|
|
||||||
|
# test interval and partition
|
||||||
|
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
|
||||||
|
# print(tdSql.queryResult)
|
||||||
|
parRows=tdSql.queryRows
|
||||||
|
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
|
||||||
|
tdSql.checkRows(parRows)
|
||||||
|
|
||||||
|
|
||||||
|
# # test insert into
|
||||||
|
# tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
|
||||||
|
# tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
|
||||||
|
|
||||||
|
# tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
|
||||||
|
|
||||||
|
|
||||||
|
# test paitition interval fill
|
||||||
|
tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
|
||||||
|
|
||||||
|
|
||||||
|
# test partition interval limit (PRcore-TD-17410)
|
||||||
|
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
|
||||||
|
tdSql.checkRows(self.ctbNums)
|
||||||
|
|
||||||
|
# test partition interval Pseudo time-column
|
||||||
|
tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
|
||||||
|
|
||||||
|
# 1 high-load:
|
||||||
|
tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
|
||||||
|
|
||||||
|
tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
|
||||||
|
|
||||||
|
# 2 stationary-trucks
|
||||||
|
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
|
||||||
|
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
|
||||||
|
|
||||||
|
# 3 long-driving-sessions
|
||||||
|
tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
|
||||||
|
|
||||||
|
|
||||||
|
#4 long-daily-sessions
|
||||||
|
tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
|
||||||
|
|
||||||
|
# 5. avg-daily-driving-duration
|
||||||
|
tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
|
||||||
|
|
||||||
|
|
||||||
|
# # 6. avg-daily-driving-session
|
||||||
|
# #taosc core dumped
|
||||||
|
# tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
|
||||||
|
# tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
|
||||||
|
# tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
|
||||||
|
# tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
|
||||||
|
|
||||||
|
# 7. avg-load
|
||||||
|
tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
|
||||||
|
|
||||||
|
# 8. daily-activity
|
||||||
|
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
|
||||||
|
|
||||||
|
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
|
||||||
|
|
||||||
|
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
|
||||||
|
|
||||||
|
tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
|
||||||
|
|
||||||
|
|
||||||
|
# 9. breakdown-frequency
|
||||||
|
# NULL ---count(NULL)=0 expect count(NULL)= 100
|
||||||
|
tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
|
||||||
|
parRows=tdSql.queryRows
|
||||||
|
assert parRows != 0 , "query result is wrong"
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
|
||||||
|
|
||||||
|
#it's already supported:
|
||||||
|
# last-loc
|
||||||
|
tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
|
||||||
|
|
||||||
|
|
||||||
|
#2. low-fuel
|
||||||
|
tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
|
||||||
|
|
||||||
|
# 3. avg-vs-projected-fuel-consumption
|
||||||
|
tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet")
|
||||||
|
|
||||||
|
def restartFunc(self,func_name,threadNumbers,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||||
|
tdLog.printNoPrefix("======== test case 1: ")
|
||||||
|
paraDict = {'dbName': 'db',
|
||||||
|
'dbNumbers': 8,
|
||||||
|
'dropFlag': 1,
|
||||||
|
'event': '',
|
||||||
|
'vgroups': 2,
|
||||||
|
'replica': 1,
|
||||||
|
'stbName': 'stb',
|
||||||
|
'stbNumbers': 100,
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||||
|
'ctbPrefix': 'ctb',
|
||||||
|
'ctbNum': 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
dnodeNumbers=int(dnodeNumbers)
|
||||||
|
mnodeNums=int(mnodeNums)
|
||||||
|
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||||
|
|
||||||
|
tdSql.query("show dnodes;")
|
||||||
|
tdLog.debug(tdSql.queryResult)
|
||||||
|
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||||
|
|
||||||
|
tdLog.info("create database and stable")
|
||||||
|
tdDnodes=cluster.dnodes
|
||||||
|
stopcount =0
|
||||||
|
threads=[]
|
||||||
|
for i in range(threadNumbers):
|
||||||
|
newTdSql=tdCom.newTdSql()
|
||||||
|
print("123")
|
||||||
|
threads.append(threading.Thread(target=func_name,args=(newTdSql,)))
|
||||||
|
print("456")
|
||||||
|
for tr in threads:
|
||||||
|
tr.start()
|
||||||
|
|
||||||
|
tdLog.info("Take turns stopping %s "%stopRole)
|
||||||
|
while stopcount < restartNumbers:
|
||||||
|
tdLog.info(" restart loop: %d"%stopcount )
|
||||||
|
if stopRole == "mnode":
|
||||||
|
for i in range(mnodeNums):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "vnode":
|
||||||
|
for i in range(vnodeNumbers):
|
||||||
|
tdDnodes[i+mnodeNums].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i+mnodeNums].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
elif stopRole == "dnode":
|
||||||
|
for i in range(dnodeNumbers):
|
||||||
|
tdDnodes[i].stoptaosd()
|
||||||
|
# sleep(10)
|
||||||
|
tdDnodes[i].starttaosd()
|
||||||
|
# sleep(10)
|
||||||
|
|
||||||
|
# dnodeNumbers don't include database of schema
|
||||||
|
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||||
|
tdLog.info("check dnodes status is ready")
|
||||||
|
else:
|
||||||
|
tdLog.info("check dnodes status is not ready")
|
||||||
|
self.stopThread(threads)
|
||||||
|
tdLog.exit("one or more of dnodes failed to start ")
|
||||||
|
# self.check3mnode()
|
||||||
|
stopcount+=1
|
||||||
|
|
||||||
|
for tr in threads:
|
||||||
|
tr.join()
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
|
||||||
|
self.createCluster()
|
||||||
|
self.prepareData()
|
||||||
|
queryPolicy=2
|
||||||
|
simClientCfg="%s/taos.cfg"%tdDnodes.getSimCfgPath()
|
||||||
|
cmd='sed -i "s/^queryPolicy.*/queryPolicy 2/g" %s'%simClientCfg
|
||||||
|
os.system(cmd)
|
||||||
|
# self.tsbsIotQuery()
|
||||||
|
self.restartFunc(func_name=self.tsbsIotQuery,threadNumbers=3,dnodeNumbers=5,mnodeNums=3,restartNumbers=3,stopRole='mnode')
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -318,7 +318,6 @@ class TDTestCase:
|
||||||
os.system(cmd)
|
os.system(cmd)
|
||||||
# tdDnodes.stop(1)
|
# tdDnodes.stop(1)
|
||||||
# tdDnodes.start(1)
|
# tdDnodes.start(1)
|
||||||
tdSql.execute("reset query cache")
|
|
||||||
tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
|
tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
|
||||||
tdSql.query("show local variables;")
|
tdSql.query("show local variables;")
|
||||||
for i in range(tdSql.queryRows):
|
for i in range(tdSql.queryRows):
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
# from asyncio.windows_events import NULL
|
||||||
import taos
|
import taos
|
||||||
import sys
|
import sys
|
||||||
import datetime
|
import datetime
|
||||||
|
@ -21,38 +22,99 @@ class TDTestCase:
|
||||||
tdLog.debug(f"start to excute {__file__}")
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
tdSql.init(conn.cursor(), True)
|
tdSql.init(conn.cursor(), True)
|
||||||
|
|
||||||
def prepareData(self):
|
def create_ctable(self,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1):
|
||||||
database="db_tsbs"
|
tsql.execute("use %s" %dbName)
|
||||||
ts=1451606400000
|
pre_create = "create table"
|
||||||
tdSql.execute(f"create database {database};")
|
sql = pre_create
|
||||||
tdSql.execute(f"use {database} ")
|
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||||
tdSql.execute('''
|
for i in range(ctbNum):
|
||||||
create table readings (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30));
|
tagValue = 'beijing'
|
||||||
''')
|
if (i % 10 == 0):
|
||||||
tdSql.execute('''
|
sql += " %s%d using %s (name,fleet,driver,device_version) tags('truck_%d', 'South%d','Trish%d','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,i)
|
||||||
create table diagnostics (ts timestamp,fuel_state double,current_load double,status bigint,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)) ;
|
else:
|
||||||
''')
|
model = 'H-%d'%i
|
||||||
|
sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,model,i)
|
||||||
|
if (i > 0) and (i%1000 == 0):
|
||||||
|
tsql.execute(sql)
|
||||||
|
sql = pre_create
|
||||||
|
if sql != pre_create:
|
||||||
|
tsql.execute(sql)
|
||||||
|
|
||||||
for i in range(10):
|
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
|
||||||
if i == 1 or i == 2 :
|
return
|
||||||
tdLog.debug(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')")
|
|
||||||
tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')")
|
def insertData(self,startTs,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1,rowsPerTbl=100,batchNum=1000):
|
||||||
else :
|
tsql.execute("use %s" %dbName)
|
||||||
tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
|
pre_insert = "insert into "
|
||||||
if i == 1 or i == 2 :
|
sql = pre_insert
|
||||||
tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')")
|
if startTs is None:
|
||||||
else:
|
t = time.time()
|
||||||
tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
|
startTs = int(round(t * 1000))
|
||||||
for j in range(10):
|
|
||||||
for i in range(100):
|
for i in range(ctbNum):
|
||||||
tdSql.execute(
|
sql += " %s%d values "%(ctbPrefix,i)
|
||||||
f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
|
for j in range(rowsPerTbl):
|
||||||
)
|
if(ctbPrefix=="rct"):
|
||||||
status= random.randint(0,1)
|
sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}, {1500+j*20}, {150+j*2},{5+j}) "
|
||||||
tdSql.execute(
|
elif ( ctbPrefix=="dct"):
|
||||||
f"insert into dct{j} values ( {ts+i*60000}, {1+i*0.1},{1400+i*15}, {status},{1500+i*20}, {150+i*2},{5+i} )"
|
status= random.randint(0,1)
|
||||||
)
|
sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status},{1500+j*20}, {150+j*2},{5+j} ) "
|
||||||
tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;")
|
# tdLog.debug("1insert sql:%s"%sql)
|
||||||
|
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
|
||||||
|
# tdLog.debug("2insert sql:%s"%sql)
|
||||||
|
tsql.execute(sql)
|
||||||
|
if j < rowsPerTbl - 1:
|
||||||
|
sql = "insert into %s%d values " %(ctbPrefix,i)
|
||||||
|
else:
|
||||||
|
sql = "insert into "
|
||||||
|
if sql != pre_insert:
|
||||||
|
# tdLog.debug("3insert sql:%s"%sql)
|
||||||
|
tsql.execute(sql)
|
||||||
|
tdLog.debug("insert data ............ [OK]")
|
||||||
|
return
|
||||||
|
|
||||||
|
def prepareData(self):
|
||||||
|
dbname="db_tsbs"
|
||||||
|
stabname1="readings"
|
||||||
|
stabname2="diagnostics"
|
||||||
|
ctbnamePre1="rct"
|
||||||
|
ctbnamePre2="dct"
|
||||||
|
ctbNums=40
|
||||||
|
self.ctbNums=ctbNums
|
||||||
|
rowNUms=200
|
||||||
|
ts=1451606400000
|
||||||
|
tdSql.execute(f"create database {dbname};")
|
||||||
|
tdSql.execute(f"use {dbname} ")
|
||||||
|
tdSql.execute(f'''
|
||||||
|
create table {stabname1} (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30));
|
||||||
|
''')
|
||||||
|
tdSql.execute(f'''
|
||||||
|
create table {stabname2} (ts timestamp,fuel_state double,current_load double,status bigint,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)) ;
|
||||||
|
''')
|
||||||
|
self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums)
|
||||||
|
self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums)
|
||||||
|
self.insertData(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums,rowsPerTbl=rowNUms,startTs=ts,batchNum=1000)
|
||||||
|
self.insertData(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums,rowsPerTbl=rowNUms,startTs=ts,batchNum=1000)
|
||||||
|
# for i in range(ctbNum):
|
||||||
|
# if i %10 == 0 :
|
||||||
|
# # tdLog.debug(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')")
|
||||||
|
# tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')")
|
||||||
|
# else :
|
||||||
|
# tdSql.execute(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
|
||||||
|
# if i %10 == 0 :
|
||||||
|
# tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')")
|
||||||
|
# else:
|
||||||
|
# tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
|
||||||
|
# for j in range(ctbNums):
|
||||||
|
# for i in range(rowNUms):
|
||||||
|
# tdSql.execute(
|
||||||
|
# f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
|
||||||
|
# )
|
||||||
|
# status= random.randint(0,1)
|
||||||
|
# tdSql.execute(
|
||||||
|
# f"insert into dct{j} values ( {ts+i*60000}, {1+i*0.1},{1400+i*15}, {status},{1500+i*20}, {150+i*2},{5+i} )"
|
||||||
|
# )
|
||||||
|
# tdSql.execute("insert into dct9 (ts,fuel_state) values('2021-07-13 14:06:33.123Z',1.2) ;")
|
||||||
# def check_avg(self ,origin_query , check_query):
|
# def check_avg(self ,origin_query , check_query):
|
||||||
# avg_result = tdSql.getResult(origin_query)
|
# avg_result = tdSql.getResult(origin_query)
|
||||||
# origin_result = tdSql.getResult(check_query)
|
# origin_result = tdSql.getResult(check_query)
|
||||||
|
@ -75,7 +137,6 @@ class TDTestCase:
|
||||||
|
|
||||||
# test interval and partition
|
# test interval and partition
|
||||||
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
|
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
|
||||||
print(tdSql.queryResult)
|
|
||||||
parRows=tdSql.queryRows
|
parRows=tdSql.queryRows
|
||||||
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
|
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
|
||||||
tdSql.checkRows(parRows)
|
tdSql.checkRows(parRows)
|
||||||
|
@ -94,7 +155,7 @@ class TDTestCase:
|
||||||
|
|
||||||
# test partition interval limit (PRcore-TD-17410)
|
# test partition interval limit (PRcore-TD-17410)
|
||||||
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
|
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
|
||||||
tdSql.checkRows(10)
|
tdSql.checkRows(self.ctbNums)
|
||||||
|
|
||||||
# test partition interval Pseudo time-column
|
# test partition interval Pseudo time-column
|
||||||
tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
|
tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
|
||||||
|
@ -136,17 +197,27 @@ class TDTestCase:
|
||||||
|
|
||||||
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
|
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
|
||||||
|
|
||||||
tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
|
tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
|
||||||
|
|
||||||
|
|
||||||
# 9. breakdown-frequency
|
# 9. breakdown-frequency
|
||||||
# NULL ---count(NULL)=0 expect count(NULL)= 100
|
# NULL ---count(NULL)=0 expect count(NULL)= 100
|
||||||
tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
|
tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
|
||||||
parRows=tdSql.queryRows
|
parRows=tdSql.queryRows
|
||||||
assert parRows != 0 , "query result is wrong"
|
assert parRows != 0 , "query result is wrong, query rows %d but expect > 0 " %parRows
|
||||||
|
|
||||||
|
|
||||||
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
|
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
|
||||||
|
sql="select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;"
|
||||||
|
|
||||||
|
# for i in range(2):
|
||||||
|
# tdSql.query("%s"%sql)
|
||||||
|
# quertR1=tdSql.queryResult
|
||||||
|
# for j in range(50):
|
||||||
|
# tdSql.query("%s"%sql)
|
||||||
|
# quertR2=tdSql.queryResult
|
||||||
|
# assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2)
|
||||||
|
|
||||||
|
|
||||||
#it's already supported:
|
#it's already supported:
|
||||||
# last-loc
|
# last-loc
|
||||||
|
|
Loading…
Reference in New Issue