fix: modify cases.task dir

This commit is contained in:
Alex Duan 2024-06-05 18:40:44 +08:00
parent 64c577465c
commit ce13165cd0
30 changed files with 7809 additions and 20 deletions

View File

@ -0,0 +1,55 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import taos
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def alterSupportVnodes(self):
tdLog.info(f"test function of altering supportVnodes")
tdSql.execute("alter dnode 1 'supportVnodes' '128'")
time.sleep(1)
tdSql.query('show dnodes')
tdSql.checkData(0, 3, "128")
tdSql.execute("alter dnode 1 'supportVnodes' '64'")
time.sleep(1)
tdSql.query('show dnodes')
tdSql.checkData(0, 3, "64")
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# TS-4721
self.alterSupportVnodes()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,110 @@
import taos
import sys
import os
import subprocess
import glob
import shutil
import time
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.srvCtl import *
from frame.caseBase import *
from frame import *
from frame.autogen import *
# from frame.server.dnodes import *
# from frame.server.cluster import *
class TDTestCase(TBase):
updatecfgDict = {
'slowLogScope':"query"
}
def init(self, conn, logSql, replicaVar=3):
super(TDTestCase, self).init(conn, logSql, replicaVar=3, db="snapshot", checkColName="c1")
self.valgrind = 0
self.childtable_count = 10
# tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
def run(self):
tdSql.prepare()
autoGen = AutoGen()
autoGen.create_db(self.db, 2, 3)
tdSql.execute(f"use {self.db}")
autoGen.create_stable(self.stb, 5, 10, 8, 8)
autoGen.create_child(self.stb, "d", self.childtable_count)
autoGen.insert_data(1000)
tdSql.execute(f"flush database {self.db}")
sc.dnodeStop(3)
# clusterDnodes.stoptaosd(1)
# clusterDnodes.starttaosd(3)
# time.sleep(5)
# clusterDnodes.stoptaosd(2)
# clusterDnodes.starttaosd(1)
# time.sleep(5)
autoGen.insert_data(5000, True)
self.flushDb(True)
# wait flush operation over
time.sleep(5)
# sql = 'show vnodes;'
# while True:
# bFinish = True
# param_list = tdSql.query(sql, row_tag=True)
# for param in param_list:
# if param[3] == 'leading' or param[3] == 'following':
# bFinish = False
# break
# if bFinish:
# break
self.snapshotAgg()
time.sleep(10)
sc.dnodeStopAll()
for i in range(1, 4):
path = clusterDnodes.getDnodeDir(i)
dnodesRootDir = os.path.join(path,"data","vnode", "vnode*")
dirs = glob.glob(dnodesRootDir)
for dir in dirs:
if os.path.isdir(dir):
self.remove_directory(os.path.join(dir, "wal"))
sc.dnodeStart(1)
sc.dnodeStart(2)
sc.dnodeStart(3)
sql = "show vnodes;"
time.sleep(10)
while True:
bFinish = True
param_list = tdSql.query(sql, row_tag=True)
for param in param_list:
if param[3] == 'offline':
tdLog.exit(
"dnode synchronous fail dnode id: %d, vgroup id:%d status offline" % (param[0], param[1]))
if param[3] == 'leading' or param[3] == 'following':
bFinish = False
break
if bFinish:
break
self.timestamp_step = 1000
self.insert_rows = 6000
self.checkInsertCorrect()
self.checkAggCorrect()
def remove_directory(self, directory):
try:
shutil.rmtree(directory)
tdLog.debug("delete dir: %s " % (directory))
except OSError as e:
tdLog.exit("delete fail dir: %s " % (directory))
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,61 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 3000,
"prepared_rand": 3000,
"thread_count": 2,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "yes",
"vgroups": 2,
"replica": 3,
"duration":"1d",
"wal_retention_period": 1,
"wal_retention_size": 1,
"keep": "3d,6d,30d"
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "no",
"childtable_count": 10,
"insert_rows": 100000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"timestamp_step": 10000,
"start_timestamp":"now-12d",
"columns": [
{ "type": "bool", "name": "bc"},
{ "type": "float", "name": "fc", "min": 100, "max": 100},
{ "type": "double", "name": "dc", "min": 200, "max": 200},
{ "type": "tinyint", "name": "ti"},
{ "type": "smallint", "name": "si" },
{ "type": "int", "name": "ic" },
{ "type": "bigint", "name": "bi" },
{ "type": "utinyint", "name": "uti"},
{ "type": "usmallint", "name": "usi"},
{ "type": "uint", "name": "ui" },
{ "type": "ubigint", "name": "ubi"},
{ "type": "binary", "name": "bin", "len": 16},
{ "type": "nchar", "name": "nch", "len": 32}
],
"tags": [
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
{"name": "location","type": "binary", "len": 16, "values":
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

View File

@ -0,0 +1,145 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.srvCtl import *
class TDTestCase(TBase):
updatecfgDict = {
"countAlwaysReturnValue" : "1",
"lossyColumns" : "float,double",
"fPrecision" : "0.000000001",
"dPrecision" : "0.00000000000000001",
"ifAdtFse" : "1",
'slowLogScope' : "insert"
}
def insertData(self):
tdLog.info(f"insert data.")
# taosBenchmark run
jfile = etool.curFile(__file__, "snapshot.json")
etool.benchMark(json=jfile)
tdSql.execute(f"use {self.db}")
# set insert data information
self.childtable_count = 10
self.insert_rows = 100000
self.timestamp_step = 10000
# create count check table
sql = f"create table {self.db}.ta(ts timestamp, age int) tags(area int)"
tdSql.execute(sql)
def checkFloatDouble(self):
sql = f"select * from {self.db}.{self.stb} where fc!=100"
tdSql.query(sql)
tdSql.checkRows(0)
sql = f"select * from {self.db}.{self.stb} where dc!=200"
tdSql.query(sql)
tdSql.checkRows(0)
sql = f"select avg(fc) from {self.db}.{self.stb}"
tdSql.checkFirstValue(sql, 100)
sql = f"select avg(dc) from {self.db}.{self.stb}"
tdSql.checkFirstValue(sql, 200)
def alterReplica3(self):
sql = f"alter database {self.db} replica 3"
tdSql.execute(sql, show=True)
time.sleep(2)
sc.dnodeStop(2)
sc.dnodeStop(3)
time.sleep(5)
sc.dnodeStart(2)
sc.dnodeStart(3)
if self.waitTransactionZero() is False:
tdLog.exit(f"{sql} transaction not finished")
return False
return True
def doAction(self):
tdLog.info(f"do action.")
self.flushDb()
# split vgroups
self.splitVGroups()
self.trimDb()
self.checkAggCorrect()
# balance vgroups
self.balanceVGroupLeader()
# replica to 1
self.alterReplica(1)
self.checkAggCorrect()
self.compactDb()
self.alterReplica3()
vgids = self.getVGroup(self.db)
selid = random.choice(vgids)
self.balanceVGroupLeaderOn(selid)
# check count always return value
sql = f"select count(*) from {self.db}.ta"
tdSql.query(sql)
tdSql.checkRows(1) # countAlwaysReturnValue is false
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# insert data
self.insertData()
# check insert data correct
self.checkInsertCorrect()
# check float double value ok
self.checkFloatDouble()
# save
self.snapshotAgg()
# do action
self.doAction()
# check save agg result correct
self.checkAggCorrect()
# check insert correct again
self.checkInsertCorrect()
# check float double value ok
self.checkFloatDouble()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,62 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 3000,
"prepared_rand": 3000,
"thread_count": 2,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"continue_if_fail": "yes",
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "yes",
"vgroups": 2,
"replica": 3,
"duration":"1d",
"wal_retention_period": 1,
"wal_retention_size": 1,
"keep": "3d,6d,30d"
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "no",
"childtable_count": 10,
"insert_rows": 100000000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"timestamp_step": 10000,
"start_timestamp":"now-12d",
"columns": [
{ "type": "bool", "name": "bc"},
{ "type": "float", "name": "fc" },
{ "type": "double", "name": "dc"},
{ "type": "tinyint", "name": "ti"},
{ "type": "smallint", "name": "si" },
{ "type": "int", "name": "ic" },
{ "type": "bigint", "name": "bi" },
{ "type": "utinyint", "name": "uti"},
{ "type": "usmallint", "name": "usi"},
{ "type": "uint", "name": "ui" },
{ "type": "ubigint", "name": "ubi"},
{ "type": "binary", "name": "bin", "len": 16},
{ "type": "nchar", "name": "nch", "len": 32}
],
"tags": [
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
{"name": "location","type": "binary", "len": 16, "values":
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

View File

@ -0,0 +1,136 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame
import frame.etool
import json
import threading
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.autogen import *
from frame.srvCtl import *
class TDTestCase(TBase):
updatecfgDict = {
'slowLogScope' : "others"
}
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to init {__file__}")
self.replicaVar = int(replicaVar)
tdSql.init(conn.cursor(), logSql) # output sql.txt file
self.configJsonFile('splitVgroupByLearner.json', 'db', 1, 1, 'splitVgroupByLearner.json', 100000)
def configJsonFile(self, fileName, dbName, vgroups, replica, newFileName='', insert_rows=100000,
timestamp_step=10000):
tdLog.debug(f"configJsonFile {fileName}")
filePath = etool.curFile(__file__, fileName)
with open(filePath, 'r') as f:
data = json.load(f)
if len(newFileName) == 0:
newFileName = fileName
data['databases'][0]['dbinfo']['name'] = dbName
data['databases'][0]['dbinfo']['vgroups'] = vgroups
data['databases'][0]['dbinfo']['replica'] = replica
data['databases'][0]['super_tables'][0]['insert_rows'] = insert_rows
data['databases'][0]['super_tables'][0]['timestamp_step'] = timestamp_step
json_data = json.dumps(data)
filePath = etool.curFile(__file__, newFileName)
with open(filePath, "w") as file:
file.write(json_data)
tdLog.debug(f"configJsonFile {json_data}")
def splitVgroupThread(self, configFile, event):
# self.insertData(configFile)
event.wait()
time.sleep(5)
tdLog.debug("splitVgroupThread start")
tdSql.execute('ALTER DATABASE db REPLICA 3')
time.sleep(5)
tdSql.execute('use db')
rowLen = tdSql.query('show vgroups')
if rowLen > 0:
vgroupId = tdSql.getData(0, 0)
tdLog.debug(f"splitVgroupThread vgroupId:{vgroupId}")
tdSql.execute(f"split vgroup {vgroupId}")
else:
tdLog.exit("get vgroupId fail!")
# self.configJsonFile(configFile, 'db1', 1, 1, configFile, 100000000)
# self.insertData(configFile)
def dnodeNodeStopThread(self, event):
event.wait()
tdLog.debug("dnodeNodeStopThread start")
time.sleep(10)
on = 2
for i in range(5):
if i % 2 == 0:
on = 2
else:
on = 3
sc.dnodeStop(on)
time.sleep(5)
sc.dnodeStart(on)
time.sleep(5)
def dbInsertThread(self, configFile, event):
tdLog.debug(f"dbInsertThread start {configFile}")
self.insertData(configFile)
event.set()
tdLog.debug(f"dbInsertThread first end {event}")
self.configJsonFile(configFile, 'db', 2, 3, configFile, 100000)
self.insertData(configFile)
def insertData(self, configFile):
tdLog.info(f"insert data.")
# taosBenchmark run
jfile = etool.curFile(__file__, configFile)
etool.benchMark(json=jfile)
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
event = threading.Event()
t1 = threading.Thread(target=self.splitVgroupThread, args=('splitVgroupByLearner.json', event))
t2 = threading.Thread(target=self.dbInsertThread, args=('splitVgroupByLearner.json', event))
t3 = threading.Thread(target=self.dnodeNodeStopThread, args=(event))
t1.start()
t2.start()
t3.start()
tdLog.debug("threading started!!!!!")
t1.join()
t2.join()
t3.join()
tdLog.success(f"{__file__} successfully executed")
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,193 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame.etool
from frame.log import *
from frame.sql import *
from frame.cases import *
from frame.caseBase import *
from frame.srvCtl import *
from frame import *
class TDTestCase(TBase):
updatecfgDict = {
'queryMaxConcurrentTables': '2K',
'streamMax': '1M',
'totalMemoryKB': '1G',
'streamMax': '1P',
'streamBufferSize':'1T',
'slowLogScope':"query"
}
def insertData(self):
tdLog.info(f"insert data.")
# set insert data information
self.childtable_count = 10
self.insert_rows = 10000
self.timestamp_step = 1000
# taosBenchmark run
etool.benchMark(command = f"-d {self.db} -t {self.childtable_count} -n {self.insert_rows} -v 2 -y")
def checkQueryOK(self, rets):
if rets[-2][:9] != "Query OK,":
tdLog.exit(f"check taos -s return unexpect: {rets}")
def doTaos(self):
tdLog.info(f"check taos command options...")
# local command
options = [
"DebugFlag 143",
"enableCoreFile 1",
"fqdn 127.0.0.1",
"firstEp 127.0.0.1",
"locale ENG",
"metaCacheMaxSize 10000",
"minimalTmpDirGB 5",
"minimalLogDirGB 1",
"secondEp 127.0.0.2",
"smlChildTableName smltbname",
"smlAutoChildTableNameDelimiter autochild",
"smlTagName tagname",
"smlTsDefaultName tsdef",
"serverPort 6030",
"slowLogScope insert",
"timezone tz",
"tempDir /var/tmp"
]
# exec
for option in options:
rets = etool.runBinFile("taos", f"-s \"alter local '{option}'\";")
self.checkQueryOK(rets)
# error
etool.runBinFile("taos", f"-s \"alter local 'nocmd check'\";")
# help
rets = etool.runBinFile("taos", "--help")
self.checkListNotEmpty(rets)
# b r w s
sql = f"select * from {self.db}.{self.stb} limit 10"
rets = etool.runBinFile("taos", f'-B -r -w 100 -s "{sql}" ')
self.checkListNotEmpty(rets)
# -C
rets = etool.runBinFile("taos", "-C")
self.checkListNotEmpty(rets)
# -t
rets = etool.runBinFile("taos", "-t")
self.checkListNotEmpty(rets)
# -v
rets = etool.runBinFile("taos", "-V")
self.checkListNotEmpty(rets)
# -?
rets = etool.runBinFile("taos", "-?")
self.checkListNotEmpty(rets)
# TSDB_FQDN_LEN = 128
lname = "testhostnamelength"
lname.rjust(230, 'a')
# except test
sql = f"show vgroups;"
etool.exeBinFile("taos", f'-h {lname} -s "{sql}" ', wait=False)
etool.exeBinFile("taos", f'-u {lname} -s "{sql}" ', wait=False)
etool.exeBinFile("taos", f'-d {lname} -s "{sql}" ', wait=False)
etool.exeBinFile("taos", f'-a {lname} -s "{sql}" ', wait=False)
etool.exeBinFile("taos", f'-p{lname} -s "{sql}" ', wait=False)
etool.exeBinFile("taos", f'-w -s "{sql}" ', wait=False)
etool.exeBinFile("taos", f'abc', wait=False)
etool.exeBinFile("taos", f'-V', wait=False)
etool.exeBinFile("taos", f'-?', wait=False)
# others
etool.exeBinFile("taos", f'-N 200 -l 2048 -s "{sql}" ', wait=False)
def doTaosd(self):
tdLog.info(f"check taosd command options...")
idx = 1 # dnode1
cfg = sc.dnodeCfgPath(idx)
# -s
sdb = "./sdb.json"
eos.delFile(sdb)
etool.exeBinFile("taosd", f"-s -c {cfg}")
# -C
etool.exeBinFile("taosd", "-C")
# -k
etool.exeBinFile("taosd", "-k", False)
# -V
rets = etool.runBinFile("taosd", "-V")
self.checkListNotEmpty(rets)
# --help
rets = etool.runBinFile("taosd", "--help")
self.checkListNotEmpty(rets)
# except input
etool.exeBinFile("taosd", "-c")
etool.exeBinFile("taosd", "-e")
# stop taosd
sc.dnodeStop(idx)
# other
etool.exeBinFile("taosd", f"-dm -c {cfg}", False)
sc.dnodeStop(idx)
etool.exeBinFile("taosd", "-a http://192.168.1.10")
#exe
etool.exeBinFile("taosd", f"-E abc -c {cfg}", False)
sc.dnodeStop(idx)
etool.exeBinFile("taosd", f"-e def -c {cfg}", False)
# stop taosd test taos as server
sc.dnodeStop(idx)
etool.exeBinFile("taos", f'-n server', wait=False)
time.sleep(3)
eos.exe("pkill -9 taos")
# call enter password
etool.exeBinFile("taos", f'-p', wait=False)
time.sleep(1)
eos.exe("pkill -9 taos")
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# insert data
self.insertData()
# do taos
self.doTaos()
# do action
self.doTaosd()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,63 @@
import taos
import sys
import os
import subprocess
import glob
import shutil
import time
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.srvCtl import *
from frame.caseBase import *
from frame import *
from frame.autogen import *
# from frame.server.dnodes import *
# from frame.server.cluster import *
class TDTestCase(TBase):
def init(self, conn, logSql, replicaVar=1):
super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1")
self.valgrind = 0
self.db = "test"
self.stb = "meters"
self.childtable_count = 10
tdSql.init(conn.cursor(), logSql)
def create_encrypt_db(self):
tdSql.execute("create encrypt_key '1234567890'")
autoGen = AutoGen()
autoGen.create_db(self.db, 2, 1, "ENCRYPT_ALGORITHM 'sm4'")
tdSql.execute(f"use {self.db}")
autoGen.create_stable(self.stb, 2, 3, 8, 8)
autoGen.create_child(self.stb, "d", self.childtable_count)
autoGen.insert_data(1000)
tdSql.query(f"select * from {self.db}.{self.stb}")
tdSql.checkRows(1000 * self.childtable_count)
self.timestamp_step = 1000
self.insert_rows = 1000
self.checkInsertCorrect()
def create_encrypt_db_error(self):
tdSql.error("create encrypt_key '123'")
tdSql.error("create encrypt_key '12345678abcdefghi'")
tdSql.error("create database test ENCRYPT_ALGORITHM 'sm4'")
def run(self):
self.create_encrypt_db_error()
self.create_encrypt_db()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

28
tests/army/frame/util.py Normal file
View File

@ -0,0 +1,28 @@
###################################################################
# Copyright (c) 2023 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
#
# about tools funciton extension
#
import sys
import os
import time
import datetime
import psutil
# cpu frequent as random
def cpuRand(max):
decimal = int(str(psutil.cpu_freq().current).split(".")[1])
return decimal % max

View File

@ -0,0 +1,86 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def checkGeometry(self):
tdLog.info(f"check geometry")
tdSql.execute("create database db_geometry;")
tdSql.execute("use db_geometry;")
tdSql.execute("create table t_ge (ts timestamp, id int, c1 GEOMETRY(512));")
tdSql.execute("insert into t_ge values(1717122943000, 1, 'MULTIPOINT ((0 0), (1 1))');")
tdSql.execute("insert into t_ge values(1717122944000, 1, 'MULTIPOINT (0 0, 1 1)');")
tdSql.execute("insert into t_ge values(1717122945000, 2, 'POINT (0 0)');")
tdSql.execute("insert into t_ge values(1717122946000, 2, 'POINT EMPTY');")
tdSql.execute("insert into t_ge values(1717122947000, 3, 'LINESTRING (0 0, 0 1, 1 2)');")
tdSql.execute("insert into t_ge values(1717122948000, 3, 'LINESTRING EMPTY');")
tdSql.execute("insert into t_ge values(1717122949000, 4, 'POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))');")
tdSql.execute("insert into t_ge values(1717122950000, 4, 'POLYGON ((0 0, 4 0, 4 4, 0 4, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1))');")
tdSql.execute("insert into t_ge values(1717122951000, 4, 'POLYGON EMPTY');")
tdSql.execute("insert into t_ge values(1717122952000, 5, 'MULTILINESTRING ((0 0, 1 1), (2 2, 3 3))');")
tdSql.execute("insert into t_ge values(1717122953000, 6, 'MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((4 3, 6 3, 6 1, 4 1, 4 3)))');")
tdSql.execute("insert into t_ge values(1717122954000, 7, 'GEOMETRYCOLLECTION (MULTIPOINT((0 0), (1 1)), POINT(3 4), LINESTRING(2 3, 3 4))');")
tdSql.query("select * from t_ge;")
tdSql.checkRows(12)
tdSql.query("select * from t_ge where id=1;")
tdSql.checkRows(2)
tdSql.query("select * from t_ge where id=2;")
tdSql.checkRows(2)
tdSql.query("select * from t_ge where id=3;")
tdSql.checkRows(2)
tdSql.query("select * from t_ge where id=4;")
tdSql.checkRows(3)
tdSql.query("select * from t_ge where id=5;")
tdSql.checkRows(1)
tdSql.query("select * from t_ge where id=6;")
tdSql.checkRows(1)
tdSql.query("select * from t_ge where id=7;")
tdSql.checkRows(1)
def checkDataType(self):
tdLog.info(f"check datatype")
self.checkGeometry()
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# check insert datatype
self.checkDataType()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,182 @@
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.eos import *
import random
import string
class TDTestCase(TBase):
"""Add test case to test column and tag boundary for task TD-28586
"""
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# define the max properties of column and tag
self.super_table_max_column_num = 4096
self.max_tag_num = 128
self.max_tag_length = 16382
self.max_column_length = 65517
self.child_table_num = 1
self.insert_round_num = 700
self.row_num_per_round = 15
self.start_ts = 1704082431000
def prepare_data(self):
# database
tdSql.execute("create database db;")
tdSql.execute("use db;")
def test_binary_boundary(self):
# create tables
tdSql.execute(f"create table st_binary (ts timestamp, c1 binary({self.max_column_length})) tags (t1 binary({self.max_tag_length}));")
for i in range(self.child_table_num):
# create child table with max column and tag length
tag = ''.join(random.sample(string.ascii_lowercase, 1)) * self.max_tag_length
tdSql.execute(f"create table ct_binary{i+1} using st_binary tags('{tag}');")
# insert data
for j in range(self.insert_round_num):
sql = "insert into ct_binary%s values" % (i+1)
for k in range(self.row_num_per_round):
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), 'a' * self.max_column_length)
tdSql.execute(sql)
tdLog.info(f"Insert {self.row_num_per_round} rows data into ct_binary{i+1} {j+1} times successfully")
tdSql.execute("flush database db;")
# check the data
for i in range(self.child_table_num):
tdSql.query(f"select * from ct_binary{i+1};")
tdSql.checkRows(10500)
row_num = random.randint(0, 9999)
tdSql.checkData(row_num, 1, 'a' * self.max_column_length)
tdSql.query(f"show tags from ct_binary{i+1};")
tdSql.checkData(0, 5, tag)
def test_varchar_boundary(self):
# create tables
tdSql.execute(f"create table st_varchar (ts timestamp, c1 varchar({self.max_column_length})) tags (t1 varchar({self.max_tag_length}));")
for i in range(self.child_table_num):
# create child table with max column and tag length
tag = ''.join(random.sample(string.ascii_lowercase, 1)) * self.max_tag_length
tdSql.execute(f"create table ct_varchar{i+1} using st_varchar tags('{tag}');")
# insert data
for j in range(self.insert_round_num):
sql = "insert into ct_varchar%s values" % (i+1)
for k in range(self.row_num_per_round):
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), 'b' * self.max_column_length)
tdSql.execute(sql)
tdLog.info(f"Insert {self.row_num_per_round} rows data into ct_varchar{i+1} {j+1} times successfully")
tdSql.execute("flush database db;")
# check the data
for i in range(self.child_table_num):
tdSql.query(f"select * from ct_varchar{i+1};")
tdSql.checkRows(10500)
row_num = random.randint(0, 9999)
tdSql.checkData(row_num, 1, 'b' * self.max_column_length)
tdSql.query(f"show tags from ct_varchar{i+1};")
tdSql.checkData(0, 5, tag)
def gen_chinese_string(self, length):
start = 0x4e00
end = 0x9fa5
chinese_string = ''
for _ in range(length):
chinese_string += chr(random.randint(start, end))
return chinese_string
def test_nchar_boundary(self):
max_nchar_column_length = self.max_column_length // 4
max_nchar_tag_length = self.max_tag_length // 4
# create tables
tdSql.execute(f"create table st_nchar (ts timestamp, c1 nchar({max_nchar_column_length})) tags (t1 nchar({max_nchar_tag_length}));")
for i in range(self.child_table_num):
# create child table with max column and tag length
tag = self.gen_chinese_string(max_nchar_tag_length)
column = self.gen_chinese_string(max_nchar_column_length)
tdSql.execute(f"create table ct_nchar{i+1} using st_nchar tags('{tag}');")
# insert data
for j in range(self.insert_round_num):
sql = "insert into ct_nchar%s values" % (i+1)
for k in range(self.row_num_per_round):
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), column)
tdSql.execute(sql)
tdLog.info(f"Insert {self.row_num_per_round} rows data into ct_nchar{i+1} {j+1} times successfully")
tdSql.execute("flush database db;")
# check the data
for i in range(self.child_table_num):
tdSql.query(f"select * from ct_nchar{i+1};")
tdSql.checkRows(10500)
row_num = random.randint(0, 9999)
tdSql.checkData(row_num, 1, column)
tdSql.query(f"show tags from ct_nchar{i+1};")
tdSql.checkData(0, 5, tag)
def test_varbinary_boundary(self):
row_num_per_round = 8
# create tables
tdSql.execute(f"create table st_varbinary (ts timestamp, c1 varbinary({self.max_column_length})) tags (t1 varbinary({self.max_tag_length}));")
for i in range(self.child_table_num):
# create child table with max column and tag length
tag = (''.join(random.sample(string.ascii_lowercase, 1)) * self.max_tag_length).encode().hex()
column = (''.join(random.sample(string.ascii_lowercase, 1)) * self.max_column_length).encode().hex()
tdSql.execute("create table ct_varbinary%s using st_varbinary tags('%s');" % (str(i+1), '\\x' + tag))
# insert data
for j in range(self.insert_round_num):
sql = "insert into ct_varbinary%s values" % (i+1)
for k in range(row_num_per_round):
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), '\\x' + column)
tdSql.execute(sql)
tdLog.info(f"Insert {row_num_per_round} rows data into ct_varbinary{i+1} {j+1} times successfully")
tdSql.execute("flush database db;")
# check the data
for i in range(self.child_table_num):
tdSql.query(f"select * from ct_varbinary{i+1};")
tdSql.checkRows(5600)
row_num = random.randint(0, 5599)
tdSql.checkData(row_num, 1, bytes.fromhex(column))
tdSql.query(f"show tags from ct_varbinary{i+1};")
tdSql.checkData(0, 5, '\\x' + tag.upper())
def test_json_tag_boundary(self):
row_num_per_round = 8
max_json_tag_length = 4095
max_json_tag_key_length = 256
# create tables
tdSql.execute(f"create table st_json_tag (ts timestamp, c1 varbinary({self.max_column_length})) tags (t1 json);")
for i in range(self.child_table_num):
# create child table with max column and tag length
tag_key = ''.join(random.sample(string.ascii_lowercase, 1)) * max_json_tag_key_length
tag_value = ''.join(random.sample(string.ascii_lowercase, 1)) * (max_json_tag_length - max_json_tag_key_length - 7)
column = (''.join(random.sample(string.ascii_lowercase, 1)) * self.max_column_length).encode().hex()
tdSql.execute("create table ct_json_tag%s using st_json_tag tags('%s');" % (str(i+1), f'{{"{tag_key}":"{tag_value}"}}'))
# insert data
for j in range(self.insert_round_num):
sql = "insert into ct_json_tag%s values" % (i+1)
for k in range(row_num_per_round):
sql += "(%s, '%s')," % (str(self.start_ts + (j * self.insert_round_num + k * self.row_num_per_round + 1)), '\\x' + column)
tdSql.execute(sql)
tdLog.info(f"Insert {row_num_per_round} rows data into ct_json_tag{i+1} {j+1} times successfully")
tdSql.execute("flush database db;")
# check the data
for i in range(self.child_table_num):
tdSql.query(f"select * from ct_json_tag{i+1} where t1->'{tag_key}' = '{tag_value}';")
tdSql.checkRows(5600)
row_num = random.randint(0, 5599)
tdSql.checkData(row_num, 1, bytes.fromhex(column))
def run(self):
self.prepare_data()
self.test_binary_boundary()
self.test_varchar_boundary()
self.test_nchar_boundary()
self.test_varbinary_boundary()
self.test_json_tag_boundary()
def stop(self):
tdSql.execute("drop database db;")
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,58 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 2000,
"thread_count": 2,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "yes",
"vgroups": 2,
"replica": 1,
"duration":"1d",
"keep": "3d,6d,30d"
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "no",
"childtable_count": 4,
"insert_rows": 1000000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"timestamp_step": 1000,
"start_timestamp":"now-12d",
"columns": [
{ "type": "bool", "name": "bc"},
{ "type": "float", "name": "fc" },
{ "type": "double", "name": "dc"},
{ "type": "tinyint", "name": "ti", "values":["1"]},
{ "type": "smallint", "name": "si" },
{ "type": "int", "name": "ic" },
{ "type": "bigint", "name": "bi" },
{ "type": "utinyint", "name": "uti"},
{ "type": "usmallint", "name": "usi"},
{ "type": "uint", "name": "ui" },
{ "type": "ubigint", "name": "ubi"},
{ "type": "binary", "name": "bin", "len": 32},
{ "type": "nchar", "name": "nch", "len": 64}
],
"tags": [
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
{"name": "location","type": "binary", "len": 16, "values":
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

View File

@ -0,0 +1,76 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import taos
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
def insertData(self):
tdLog.info(f"insert data.")
# taosBenchmark run
json = etool.curFile(__file__, "mlevel_basic.json")
etool.benchMark(json=json)
tdSql.execute(f"use {self.db}")
# set insert data information
self.childtable_count = 4
self.insert_rows = 1000000
self.timestamp_step = 1000
def doAction(self):
tdLog.info(f"do action.")
self.flushDb()
self.trimDb()
self.compactDb()
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# insert data
self.insertData()
# check insert data correct
self.checkInsertCorrect()
# save
self.snapshotAgg()
# do action
self.doAction()
# check save agg result correct
self.checkAggCorrect()
# check insert correct again
self.checkInsertCorrect()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,77 @@
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.eos import *
import random
import string
class TDTestCase(TBase):
"""Add test case to verify the complicated query accuracy
"""
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def prepare_data(self):
# database for case TS-4806
tdSql.execute("create database db_ts4806;")
tdSql.execute("use db_ts4806;")
# super table
tdSql.execute("create table st (ts timestamp, adl float, bdl float, cdl float, ady float, bdy float, cdy float) \
tags(pt_radio float, ct_ratio float, rated_cap float, ta_id varchar(128), id varchar(128), area_code \
varchar(128), zdy_flag int, elec_cust_name bigint,bureau_code bigint, fl_name varchar(32), classify_id \
varchar(128));")
# child table
tdSql.execute("create table ct_1 using st tags(1.2, 1.3, 3.4, '271000276', '30000001', '10001', 1, 10001, 2000001, 'beijing', '13169');")
tdSql.execute("create table ct_2 using st tags(2.1, 1.2, 3.3, '271000277', '30000002', '10002', 1, 10002, 2000002, 'shanghai', '13141');")
tdSql.execute("create table ct_3 using st tags(3.1, 4.2, 5.3, '271000278', '30000003', '10003', 0, 10003, 2000003, 'guangzhou', '13151');")
# insert data for ts4806
start_ts = 1705783972000
data = [
(1.1, 2.2, 3.3, 1.1, 2.2, 3.3),
(1.2, 2.3, 3.4, 1.2, 2.3, 3.4),
(1.3, 2.4, 3.5, 1.3, 2.4, 3.5),
(1.4, 2.5, 3.6, 1.4, 2.5, 3.6),
(1.5, 2.6, 3.7, 1.5, 2.6, 3.7),
(1.6, 2.7, 3.8, 1.6, 2.7, 3.8),
(1.7, 2.8, 3.9, 1.7, 2.8, 3.9),
(1.8, 2.9, 4.0, 1.8, 2.9, 4.0),
(1.9, 4.2, 4.1, 1.9, 3.0, 4.1),
(1.2, 3.1, 4.2, 2.0, 3.1, 4.2)
]
index = [1, 2, 5, 0, 7, 3, 8, 4, 6, 9]
for ct in ['ct_1', 'ct_2']:
for i in range(10):
sql = f"insert into {ct} values"
for j in range(1000):
sql += f"({start_ts + i * 1000 * 1000 + j * 1000}, {','.join([str(item) for item in data[index[i]]])}),"
sql += ";"
tdSql.execute(sql)
def test_ts4806(self):
tdSql.execute("use db_ts4806;")
tdSql.query("select _wstart, cj.id, count(*) from st cj where cj.ts >= '2024-01-21 04:52:52.000' and cj.ts <= ' 2024-01-21 07:39:31.000' \
and cj.zdy_flag = 1 and cj.id in ('30000001', '30000002') partition by cj.id event_window start with \
(CASE WHEN cj.adl >= cj.bdl AND cj.adl >= cj.cdl THEN cj.adl WHEN cj.bdl >= cj.adl AND cj.bdl >= cj.cdl \
THEN cj.bdl ELSE cj.cdl END) * cj.ct_ratio * 0.4 * 1.732 / cj.rated_cap > 1 end with (CASE WHEN cj.adl >= \
cj.bdl AND cj.adl >= cj.cdl THEN cj.adl WHEN cj.bdl >= cj.adl AND cj.bdl >= cj.cdl THEN cj.bdl ELSE cj.cdl \
END) * cj.ct_ratio * 0.4 * 1.732 / cj.rated_cap <= 1 HAVING count(*) >= 4 order by _wstart, cj.id;")
tdSql.checkRows(5)
tdSql.checkData(4, 1, '30000002')
tdSql.checkData(4, 2, 1001)
def run(self):
self.prepare_data()
self.test_ts4806()
def stop(self):
tdSql.execute("drop database db_ts4806;")
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,61 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 4000,
"prepared_rand": 10000,
"thread_count": 3,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "no",
"vgroups": 3,
"replica": 3,
"duration":"3d",
"wal_retention_period": 1,
"wal_retention_size": 1,
"stt_trigger": 1
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "yes",
"childtable_count": 6,
"insert_rows": 50000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"timestamp_step": 60000,
"start_timestamp":1700000000000,
"columns": [
{ "type": "bool", "name": "bc"},
{ "type": "float", "name": "fc" },
{ "type": "double", "name": "dc"},
{ "type": "tinyint", "name": "ti"},
{ "type": "smallint", "name": "si" },
{ "type": "int", "name": "ic" },
{ "type": "bigint", "name": "bi" },
{ "type": "utinyint", "name": "uti"},
{ "type": "usmallint", "name": "usi"},
{ "type": "uint", "name": "ui" },
{ "type": "ubigint", "name": "ubi"},
{ "type": "binary", "name": "bin", "len": 8},
{ "type": "nchar", "name": "nch", "len": 16}
],
"tags": [
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
{"name": "location","type": "binary", "len": 16, "values":
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

View File

@ -0,0 +1,70 @@
import taos
import sys
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
updatecfgDict = {
'slowLogScope':"all"
}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
def run(self):
dbname = "db"
stbname = "ocloud_point"
tbname = "ocloud_point_170658_3837620225_1701134595725266945"
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
f'''create stable if not exists {dbname}.{stbname}
(wstart timestamp, point_value float) tags (location binary(64), groupId int)
'''
)
tdSql.execute(
f'''create table if not exists {dbname}.{tbname} using {dbname}.{stbname} tags("California.SanFrancisco", 2)'''
)
sqls = []
for i in range(35, 41):
if i == 38 or i == 40:
sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:{i}:00.000', null)")
else:
sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:{i}:00.000', 5.0)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:36:00.000', 5.0)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:37:00.000', 5.0)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:38:00.000', null)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:39:00.000', 5.0)")
# sqls.append(f"insert into {dbname}.{tbname} values('2023-12-26 10:40:00.000', null)")
tdSql.executes(sqls)
tdLog.printNoPrefix("==========step3:fill data")
sql = f"select first(point_value) as pointValue from {dbname}.{tbname} where wstart between '2023-12-26 10:35:00' and '2023-12-26 10:40:00' interval(1M) fill(prev) order by wstart desc limit 100"
data = []
for i in range(6):
row = [5]
data.append(row)
tdSql.checkDataMem(sql, data)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,447 @@
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.eos import *
class TDTestCase(TBase):
"""Verify the elapsed function
"""
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.dbname = 'db'
self.table_dic = {
"super_table": ["st1", "st2", "st_empty"],
"child_table": ["ct1_1", "ct1_2", "ct1_empty", "ct2_1", "ct2_2", "ct2_empty"],
"tags_value": [("2023-03-01 15:00:00", 1, 'bj'), ("2023-03-01 15:10:00", 2, 'sh'), ("2023-03-01 15:20:00", 3, 'sz'), ("2023-03-01 15:00:00", 4, 'gz'), ("2023-03-01 15:10:00", 5, 'cd'), ("2023-03-01 15:20:00", 6, 'hz')],
"common_table": ["t1", "t2", "t_empty"]
}
self.start_ts = 1677654000000 # 2023-03-01 15:00:00.000
self.row_num = 100
def prepareData(self):
# db
tdSql.execute(f"create database {self.dbname};")
tdSql.execute(f"use {self.dbname};")
tdLog.debug(f"Create database {self.dbname}")
# commont table
for common_table in self.table_dic["common_table"]:
tdSql.execute(f"create table {common_table} (ts timestamp, c_ts timestamp, c_int int, c_bigint bigint, c_double double, c_nchar nchar(16));")
tdLog.debug("Create common table %s" % common_table)
# super table
for super_table in self.table_dic["super_table"]:
tdSql.execute(f"create stable {super_table} (ts timestamp, c_ts timestamp, c_int int, c_bigint bigint, c_double double, c_nchar nchar(16)) tags (t1 timestamp, t2 int, t3 binary(16));")
tdLog.debug("Create super table %s" % super_table)
# child table
for i in range(len(self.table_dic["child_table"])):
if self.table_dic["child_table"][i].startswith("ct1"):
tdSql.execute("create table {} using {} tags('{}', {}, '{}');".format(self.table_dic["child_table"][i], "st1", self.table_dic["tags_value"][i][0], self.table_dic["tags_value"][i][1], self.table_dic["tags_value"][i][2]))
elif self.table_dic["child_table"][i].startswith("ct2"):
tdSql.execute("create table {} using {} tags('{}', {}, '{}');".format(self.table_dic["child_table"][i], "st2", self.table_dic["tags_value"][i][0], self.table_dic["tags_value"][i][1], self.table_dic["tags_value"][i][2]))
# insert data
table_list = ["t1", "t2", "ct1_1", "ct1_2", "ct2_1", "ct2_2"]
for t in table_list:
sql = "insert into {} values".format(t)
for i in range(self.row_num):
sql += "({}, {}, {}, {}, {}, '{}'),".format(self.start_ts + i * 1000, self.start_ts + i * 1000, 32767+i, 65535+i, i, t + str(i))
sql += ";"
tdSql.execute(sql)
tdLog.debug("Insert data into table %s" % t)
def test_normal_query(self):
# only one timestamp
tdSql.query("select elapsed(ts) from t1 group by c_ts;")
tdSql.checkRows(self.row_num)
tdSql.checkData(0, 0, 0)
tdSql.query("select elapsed(ts, 1m) from t1 group by c_ts;")
tdSql.checkRows(self.row_num)
tdSql.checkData(0, 0, 0)
# child table with group by
tdSql.query("select elapsed(ts) from ct1_2 group by tbname;")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 99000)
# empty super table
tdSql.query("select elapsed(ts, 1s) from st_empty group by tbname;")
tdSql.checkRows(0)
# empty child table
tdSql.query("select elapsed(ts, 1s) from ct1_empty group by tbname;")
tdSql.checkRows(0)
# empty common table
tdSql.query("select elapsed(ts, 1s) from t_empty group by tbname;")
tdSql.checkRows(0)
# unit as second
tdSql.query("select elapsed(ts, 1s) from st2 group by tbname;")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 99)
# unit as minute
tdSql.query("select elapsed(ts, 1m) from st2 group by tbname;")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 1.65)
# unit as hour
tdSql.query("select elapsed(ts, 1h) from st2 group by tbname;")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 0.0275)
def test_query_with_filter(self):
end_ts = 1677654000000 + 1000 * 99
query_list = [
{
"sql": "select elapsed(ts, 1s) from st1 where ts >= 1677654000000 group by tbname;",
"res": [(99.0, ), (99.0, )]
},
{
"sql": "select elapsed(ts, 1s) from st1 where ts >= 1677654000000 and c_ts >= 1677654000000 group by tbname;",
"res": [(99.0, ), (99.0, )]
},
{
"sql": "select elapsed(ts, 1s) from st1 where ts >= 1677654000000 and c_ts >= 1677654000000 and t1='2023-03-01 15:10:00.000' group by tbname;",
"res": [(99.0, )]
},
{
"sql": "select elapsed(ts, 1s) from st_empty where ts >= 1677654000000 and c_ts >= 1677654000000 and t1='2023-03-01 15:10:00.000' group by tbname;",
"res": []
},
{
"sql": "select elapsed(ts, 1s) from ct1_1 where ts >= 1677654000000 group by tbname;",
"res": [(99.0, )]
},
{
"sql": "select elapsed(ts, 1s) from ct1_2 where ts >= 1677654000000 and c_ts >= 1677654000000 group by tbname;",
"res": [(99.0, )]
},
{
"sql": "select elapsed(ts, 1s) from ct1_empty where ts >= 1677654000000 and c_ts >= 1677654000000 group by tbname;",
"res": []
},
{
"sql": "select elapsed(ts, 1s) from t1 where ts >= 1677654000000 group by tbname;",
"res": [(99.0, )]
},
{
"sql": "select elapsed(ts, 1s) from t2 where ts >= 1677654000000 and c_ts >= 1677654000000 group by tbname;",
"res": [(99.0, )]
},
{
"sql": "select elapsed(ts, 1s) from t_empty where ts >= 1677654000000 and c_ts >= 1677654000000 group by tbname;",
"res": []
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_ts > {} group by tbname;".format(end_ts),
"res": []
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_ts > {} and t1='2023-03-01 15:10:00' group by tbname;".format(end_ts),
"res": []
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_int < 1 group by tbname;",
"res": []
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_int >= 1 and t1='2023-03-01 15:10:00' group by tbname;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_int <> 1 and t1='2023-03-01 15:10:00' group by tbname;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_nchar like 'ct2_%' and t1='2023-03-01 15:10:00' group by tbname;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_nchar like 'ct1_%' and t1='2023-03-01 15:10:00' group by tbname;",
"res": []
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_nchar match '^ct2_' and t1='2023-03-01 15:10:00' group by tbname;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and c_nchar nmatch '^ct1_' and t1='2023-03-01 15:10:00' group by tbname;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st2 where ts >= 1677654000000 and t3 like 'g%' group by tbname;",
"res": [(99,)]
}
]
sql_list = []
res_list = []
for item in query_list:
sql_list.append(item["sql"])
res_list.append(item["res"])
tdSql.queryAndCheckResult(sql_list, res_list)
def test_query_with_other_function(self):
query_list = [
{
"sql": "select avg(c_int), count(*), elapsed(ts, 1s), leastsquares(c_int, 0, 1), spread(c_bigint), sum(c_int), hyperloglog(c_int) from st1;",
"res": [(32816.5, 200, 99.0, '{slop:0.499962, intercept:32766.753731}', 99.0, 6563300, 100)]
},
{
"sql": "select twa(c_int) * elapsed(ts, 1s) from ct1_1;",
"res": [(3.248833500000000e+06,)]
}
]
sql_list = []
res_list = []
for item in query_list:
sql_list.append(item["sql"])
res_list.append(item["res"])
tdSql.queryAndCheckResult(sql_list, res_list)
def test_query_with_join(self):
query_list = [
{
"sql": "select elapsed(st1.ts, 1s) from st1, st2 where st1.ts = st2.ts;",
"res": [(99,)]
},
{
"sql": "select elapsed(st1.ts, 1s) from st1, st_empty where st1.ts = st_empty.ts and st1.c_ts = st_empty.c_ts;",
"res": []
},
{
"sql": "select elapsed(st1.ts, 1s) from st1, ct1_1 where st1.ts = ct1_1.ts;",
"res": [(99,)]
},
{
"sql": "select elapsed(ct1.ts, 1s) from ct1_1 ct1, ct1_2 ct2 where ct1.ts = ct2.ts;",
"res": [(99,)]
},
{
"sql": "select elapsed(ct1.ts, 1s) from ct1_1 ct1, ct1_empty ct2 where ct1.ts = ct2.ts;",
"res": []
},
{
"sql": "select elapsed(st1.ts, 1s) from st1, ct1_empty where st1.ts = ct1_empty.ts;",
"res": []
},
{
"sql": "select elapsed(st1.ts, 1s) from st1, t1 where st1.ts = t1.ts;",
"res": [(99,)]
},
{
"sql": "select elapsed(st1.ts, 1s) from st1, t_empty where st1.ts = t_empty.ts;",
"res": []
},
{
"sql": "select elapsed(ct1.ts, 1s) from ct1_1 ct1, t1 t2 where ct1.ts = t2.ts;",
"res": [(99,)]
},
{
"sql": "select elapsed(ct1.ts, 1s) from ct1_1 ct1, t_empty t2 where ct1.ts = t2.ts;",
"res": []
},
{
"sql": "select elapsed(st1.ts, 1s) from st1, st2, st_empty where st1.ts=st2.ts and st2.ts=st_empty.ts;",
"res": []
}
]
sql_list = []
res_list = []
for item in query_list:
sql_list.append(item["sql"])
res_list.append(item["res"])
tdSql.queryAndCheckResult(sql_list, res_list)
def test_query_with_union(self):
query_list = [
{
"sql": "select elapsed(ts, 1s) from st1 union select elapsed(ts, 1s) from st2;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st1 union all select elapsed(ts, 1s) from st2;",
"res": [(99,),(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st1 union all select elapsed(ts, 1s) from st_empty;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from ct1_1 union all select elapsed(ts, 1s) from ct1_2;",
"res": [(99,),(99,)]
},
{
"sql": "select elapsed(ts, 1s) from ct1_1 union select elapsed(ts, 1s) from ct1_2;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from ct1_1 union select elapsed(ts, 1s) from ct1_empty;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st1 where ts < '2023-03-01 15:05:00.000' union select elapsed(ts, 1s) from ct1_1 where ts >= '2023-03-01 15:01:00.000';",
"res": [(39,),(99,)]
},
{
"sql": "select elapsed(ts, 1s) from ct1_empty union select elapsed(ts, 1s) from t_empty;",
"res": []
},
{
"sql": "select elapsed(ts, 1s) from st1 group by tbname union select elapsed(ts, 1s) from st2 group by tbname;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st1 group by tbname union all select elapsed(ts, 1s) from st2 group by tbname;",
"res": [(99,),(99,),(99,),(99,)]
},
{
"sql": "select elapsed(ts, 1s) from st_empty group by tbname union all select elapsed(ts, 1s) from st2 group by tbname;",
"res": [(99,),(99,)]
},
{
"sql": "select elapsed(ts, 1s) from t1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000' interval(10s) fill(next) union select elapsed(ts, 1s) from st2 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:49.000' interval(5s) fill(prev);",
"res": [(9,), (None,), (4,), (5,),(10,)]
},
{
"sql": "select elapsed(ts, 1s) from st1 group by tbname union select elapsed(ts, 1s) from st2 group by tbname union select elapsed(ts, 1s) from st_empty group by tbname;",
"res": [(99,)]
}
]
sql_list = []
res_list = []
for item in query_list:
sql_list.append(item["sql"])
res_list.append(item["res"])
tdSql.queryAndCheckResult(sql_list, res_list)
def test_query_with_window(self):
query_list = [
{
"sql": "select elapsed(ts, 1s) from st1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:00:20.000' interval(10s) fill(next);",
"res": [(10,),(10,)()]
},
{
"sql": "select elapsed(ts, 1s) from (select * from st1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:20.000' and c_int > 100) where ts >= '2023-03-01 15:01:00.000' and ts < '2023-03-01 15:02:00.000' interval(10s) fill(prev);",
"res": [(10,)(10,)(),(),(),()]
},
{
"sql": "select elapsed(ts, 1s) from st1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:00:20.000' session(ts, 2s);",
"res": [(20,)]
},
{
"sql": "select elapsed(ts, 1s) from st_empty where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:00:20.000' session(ts, 2s);",
"res": []
}
]
sql_list = []
res_list = []
for item in query_list:
sql_list.append(item["sql"])
res_list.append(item["res"])
tdSql.queryAndCheckResult(sql_list, res_list)
def test_nested_query(self):
query_list = [
{
"sql": "select elapsed(ts, 1s) from (select * from st1 where c_int > 10 and ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000');",
"res": [(99,)]
},
{
"sql": "select sum(v) from (select elapsed(ts, 1s) as v from st1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:00:20.000' interval(10s) fill(next));",
"res": [(20,)]
},
{
"sql": "select avg(v) from (select elapsed(ts, 1s) as v from st2 group by tbname order by v);",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from (select * from st1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000') where c_int > 10;",
"res": [(99,)]
},
{
"sql": "select elapsed(ts, 1s) from (select * from st1 where c_int > 10 and ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000') where c_int < 20;",
"res": []
}
]
sql_list = []
res_list = []
for item in query_list:
sql_list.append(item["sql"])
res_list.append(item["res"])
tdSql.queryAndCheckResult(sql_list, res_list)
def test_abnormal_query(self):
# incorrect parameter
table_list = self.table_dic["super_table"] + self.table_dic["child_table"] + self.table_dic["common_table"]
incorrect_parameter_list = ["()", "(null)", "(*)", "(c_ts)", "(c_ts, 1s)", "(c_int)", "(c_bigint)", "(c_double)", "(c_nchar)", "(ts, null)",
"(ts, *)", "(2024-01-09 17:00:00)", "(2024-01-09 17:00:00, 1s)", "(t1)", "(t1, 1s)", "(t2)", "(t3)"]
for table in table_list:
for param in incorrect_parameter_list:
if table.startswith("st"):
tdSql.error("select elapsed{} from {} group by tbname order by ts;".format(param, table))
else:
tdSql.error("select elapsed{} from {};".format(param, table))
tdSql.error("select elapsed{} from {} group by ".format(param, table))
# query with unsupported function, like leastsquares、diff、derivative、top、bottom、last_row、interp
unsupported_sql_list = [
"select elapsed(leastsquares(c_int, 1, 2)) from st1 group by tbname;",
"select elapsed(diff(ts)) from st1;",
"select elapsed(derivative(ts, 1s, 1)) from st1 group by tbname order by ts;",
"select elapsed(top(ts, 5)) from st1 group by tbname order by ts;",
"select top(elapsed(ts), 5) from st1 group by tbname order by ts;",
"select elapsed(bottom(ts)) from st1 group by tbname order by ts;",
"select bottom(elapsed(ts)) from st1 group by tbname order by ts;",
"select elapsed(last_row(ts)) from st1 group by tbname order by ts;",
"select elapsed(interp(ts, 0)) from st1 group by tbname order by ts;"
]
tdSql.errors(unsupported_sql_list)
# nested aggregate function
nested_sql_list = [
"select avg(elapsed(ts, 1s)) from st1 group by tbname order by ts;",
"select elapsed(avg(ts), 1s) from st1 group by tbname order by ts;",
"select elapsed(sum(ts), 1s) from st1 group by tbname order by ts;",
"select elapsed(count(ts), 1s) from st1 group by tbname order by ts;",
"select elapsed(min(ts), 1s) from st1 group by tbname order by ts;",
"select elapsed(max(ts), 1s) from st1 group by tbname order by ts;",
"select elapsed(first(ts), 1s) from st1 group by tbname order by ts;",
"select elapsed(last(ts), 1s) from st1 group by tbname order by ts;"
]
tdSql.errors(nested_sql_list)
# other error
other_sql_list = [
"select elapsed(ts, 1s) from t1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000' interval(10s) fill(next) union select elapsed(ts, 1s) from st2 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:49.000' interval(5s) fill(prev) group by tbname;",
"select elapsed(time ,1s) from (select elapsed(ts,1s) time from st1);",
"select elapsed(ts , 1s) from (select elapsed(ts, 1s) ts from st2);",
"select elapsed(time, 1s) from (select elapsed(ts, 1s) time from st1 group by tbname);",
"select elapsed(ts , 1s) from (select elapsed(ts, 1s) ts from st2 group by tbname);",
"select elapsed(ts, 1s) from (select * from st1 where ts between '2023-03-01 15:00:00.000' and '2023-03-01 15:01:40.000' interval(10s) fill(next)) where c_int > 10;"
]
tdSql.errors(other_sql_list)
def run(self):
self.prepareData()
self.test_normal_query()
self.test_query_with_filter()
self.test_query_with_other_function()
self.test_query_with_join()
self.test_query_with_union()
self.test_abnormal_query()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,61 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 4000,
"prepared_rand": 10000,
"thread_count": 3,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "yes",
"vgroups": 3,
"replica": 3,
"duration":"3d",
"wal_retention_period": 1,
"wal_retention_size": 1,
"stt_trigger": 1
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "no",
"childtable_count": 6,
"insert_rows": 100000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"timestamp_step": 30000,
"start_timestamp":1700000000000,
"columns": [
{ "type": "bool", "name": "bc"},
{ "type": "float", "name": "fc" },
{ "type": "double", "name": "dc"},
{ "type": "tinyint", "name": "ti"},
{ "type": "smallint", "name": "si" },
{ "type": "int", "name": "ic" },
{ "type": "bigint", "name": "bi" },
{ "type": "utinyint", "name": "uti"},
{ "type": "usmallint", "name": "usi"},
{ "type": "uint", "name": "ui" },
{ "type": "ubigint", "name": "ubi"},
{ "type": "binary", "name": "bin", "len": 8},
{ "type": "nchar", "name": "nch", "len": 16}
],
"tags": [
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
{"name": "location","type": "binary", "len": 16, "values":
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

View File

@ -0,0 +1,447 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
updatecfgDict = {
"keepColumnName" : "1",
"ttlChangeOnWrite" : "1",
"querySmaOptimize" : "1",
"slowLogScope" : "none",
"queryBufferSize" : 10240
}
def insertData(self):
tdLog.info(f"insert data.")
# taosBenchmark run
jfile = etool.curFile(__file__, "query_basic.json")
etool.benchMark(json = jfile)
tdSql.execute(f"use {self.db}")
tdSql.execute("select database();")
# come from query_basic.json
self.childtable_count = 6
self.insert_rows = 100000
self.timestamp_step = 30000
self.start_timestamp = 1700000000000
# write again disorder
self.flushDb()
jfile = etool.curFile(__file__, "cquery_basic.json")
etool.benchMark(json = jfile)
def genTime(self, preCnt, cnt):
start = self.start_timestamp + preCnt * self.timestamp_step
end = start + self.timestamp_step * cnt
return (start, end)
def doWindowQuery(self):
pre = f"select count(ts) from {self.stb} "
# case1 operator "in" "and" is same
cnt = 6000
s,e = self.genTime(12000, cnt)
sql1 = f"{pre} where ts between {s} and {e} "
sql2 = f"{pre} where ts >= {s} and ts <={e} "
expectCnt = (cnt + 1) * self.childtable_count
tdSql.checkFirstValue(sql1, expectCnt)
tdSql.checkFirstValue(sql2, expectCnt)
# case2 no overloap "or" left
cnt1 = 120
s1, e1 = self.genTime(4000, cnt1)
cnt2 = 3000
s2, e2 = self.genTime(10000, cnt2)
sql = f"{pre} where (ts >= {s1} and ts < {e1}) or (ts >= {s2} and ts < {e2})"
expectCnt = (cnt1 + cnt2) * self.childtable_count
tdSql.checkFirstValue(sql, expectCnt)
# case3 overloap "or" right
cnt1 = 300
s1, e1 = self.genTime(17000, cnt1)
cnt2 = 8000
s2, e2 = self.genTime(70000, cnt2)
sql = f"{pre} where (ts > {s1} and ts <= {e1}) or (ts > {s2} and ts <= {e2})"
expectCnt = (cnt1 + cnt2) * self.childtable_count
tdSql.checkFirstValue(sql, expectCnt)
# case4 overloap "or"
cnt1 = 1000
s1, e1 = self.genTime(9000, cnt1)
cnt2 = 1000
s2, e2 = self.genTime(9000 + 500 , cnt2)
sql = f"{pre} where (ts > {s1} and ts <= {e1}) or (ts > {s2} and ts <= {e2})"
expectCnt = (cnt1 + 500) * self.childtable_count # expect=1500
tdSql.checkFirstValue(sql, expectCnt)
# case5 overloap "or" boundary hollow->solid
cnt1 = 3000
s1, e1 = self.genTime(45000, cnt1)
cnt2 = 2000
s2, e2 = self.genTime(45000 + cnt1 , cnt2)
sql = f"{pre} where (ts > {s1} and ts <= {e1}) or (ts > {s2} and ts <= {e2})"
expectCnt = (cnt1+cnt2) * self.childtable_count
tdSql.checkFirstValue(sql, expectCnt)
# case6 overloap "or" boundary solid->solid
cnt1 = 300
s1, e1 = self.genTime(55000, cnt1)
cnt2 = 500
s2, e2 = self.genTime(55000 + cnt1 , cnt2)
sql = f"{pre} where (ts >= {s1} and ts <= {e1}) or (ts >= {s2} and ts <= {e2})"
expectCnt = (cnt1+cnt2+1) * self.childtable_count
tdSql.checkFirstValue(sql, expectCnt)
# case7 overloap "and"
cnt1 = 1000
s1, e1 = self.genTime(40000, cnt1)
cnt2 = 1000
s2, e2 = self.genTime(40000 + 500 , cnt2)
sql = f"{pre} where (ts > {s1} and ts <= {e1}) and (ts > {s2} and ts <= {e2})"
expectCnt = cnt1/2 * self.childtable_count
tdSql.checkFirstValue(sql, expectCnt)
# case8 overloap "and" boundary hollow->solid solid->hollow
cnt1 = 3000
s1, e1 = self.genTime(45000, cnt1)
cnt2 = 2000
s2, e2 = self.genTime(45000 + cnt1 , cnt2)
sql = f"{pre} where (ts > {s1} and ts <= {e1}) and (ts >= {s2} and ts < {e2})"
expectCnt = 1 * self.childtable_count
tdSql.checkFirstValue(sql, expectCnt)
# case9 no overloap "and"
cnt1 = 6000
s1, e1 = self.genTime(20000, cnt1)
cnt2 = 300
s2, e2 = self.genTime(70000, cnt2)
sql = f"{pre} where (ts > {s1} and ts <= {e1}) and (ts >= {s2} and ts <= {e2})"
expectCnt = 0
tdSql.checkFirstValue(sql, expectCnt)
# case10 cnt1 contain cnt2 and
cnt1 = 5000
s1, e1 = self.genTime(25000, cnt1)
cnt2 = 400
s2, e2 = self.genTime(28000, cnt2)
sql = f"{pre} where (ts > {s1} and ts <= {e1}) and (ts >= {s2} and ts < {e2})"
expectCnt = cnt2 * self.childtable_count
tdSql.checkFirstValue(sql, expectCnt)
def queryMax(self, colname):
sql = f"select max({colname}) from {self.stb}"
tdSql.query(sql)
return tdSql.getData(0, 0)
def checkMax(self):
# max for tsdbRetrieveDatablockSMA2 coverage
colname = "ui"
max = self.queryMax(colname)
# insert over max
sql = f"insert into d0(ts, {colname}) values"
for i in range(1, 5):
sql += f" (now + {i}s, {max+i})"
tdSql.execute(sql)
self.flushDb()
expectMax = max + 4
for i in range(1, 5):
realMax = self.queryMax(colname)
if realMax != expectMax:
tdLog.exit(f"Max value not expect. expect:{expectMax} real:{realMax}")
# query ts list
sql = f"select ts from d0 where ui={expectMax}"
tdSql.query(sql)
tss = tdSql.getColData(0)
for ts in tss:
# delete
sql = f"delete from d0 where ts = '{ts}'"
tdSql.execute(sql)
expectMax -= 1
self.checkInsertCorrect()
def doQuery(self):
tdLog.info(f"do query.")
self.doWindowQuery()
# max
self.checkMax()
# __group_key
sql = f"select count(*),_group_key(uti),uti from {self.stb} partition by uti"
tdSql.query(sql)
# column index 1 value same with 2
tdSql.checkSameColumn(1, 2)
sql = f"select count(*),_group_key(usi),usi from {self.stb} group by usi limit 100;"
tdSql.query(sql)
tdSql.checkSameColumn(1, 2)
# tail
sql1 = "select ts,ui from d0 order by ts desc limit 5 offset 2;"
sql2 = "select ts,tail(ui,5,2) from d0;"
self.checkSameResult(sql1, sql2)
# uninqe
sql1 = "select distinct uti from d0 order by uti;"
sql2 = "select UNIQUE(uti) from d0 order by uti asc;"
self.checkSameResult(sql1, sql2)
# top
sql1 = "select top(bi,10) from stb;"
sql2 = "select bi from stb where bi is not null order by bi desc limit 10;"
self.checkSameResult(sql1, sql2)
# distributed expect values
expects = {
"Block_Rows" : 6*100000,
"Total_Tables" : 6,
"Total_Vgroups" : 3
}
self.waitTransactionZero()
reals = self.getDistributed(self.stb)
for k in expects.keys():
v = expects[k]
if int(reals[k]) != v:
tdLog.exit(f"distribute {k} expect: {v} real: {reals[k]}")
def checkNull(self):
# abs unique concat_ws
ts = self.start_timestamp + 1
sql = f"insert into {self.db}.d0(ts) values({ts})"
tdSql.execute(sql)
sql = f'''select abs(fc),
unique(ic),
concat_ws(',',bin,nch),
timetruncate(bi,1s,0),
timediff(ic,bi,1s),
to_timestamp(nch,'yyyy-mm-dd hh:mi:ss.ms.us.ns')
from {self.db}.d0 where ts={ts}'''
tdSql.query(sql)
tdSql.checkData(0, 0, "None")
tdSql.checkData(0, 1, "None")
tdSql.checkData(0, 2, "None")
tdSql.checkData(0, 3, "None")
tdSql.checkData(0, 4, "None")
# substr from 0 start
sql1 = f"select substr(bin,1) from {self.db}.d0 order by ts desc limit 100"
sql2 = f"select bin from {self.db}.d0 order by ts desc limit 100"
self.checkSameResult(sql1, sql2)
#substr error input pos is zero
sql = f"select substr(bin,0,3) from {self.db}.d0 order by ts desc limit 100"
tdSql.error(sql)
# cast
nch = 99
sql = f"insert into {self.db}.d0(ts, nch) values({ts}, '{nch}')"
tdSql.execute(sql)
sql = f"select cast(nch as tinyint), \
cast(nch as tinyint unsigned), \
cast(nch as smallint), \
cast(nch as smallint unsigned), \
cast(nch as int unsigned), \
cast(nch as bigint unsigned), \
cast(nch as float), \
cast(nch as double), \
cast(nch as bool) \
from {self.db}.d0 where ts={ts}"
row = [nch, nch, nch, nch, nch, nch, nch, nch, True]
tdSql.checkDataMem(sql, [row])
# cast string is zero
ts += 1
sql = f"insert into {self.db}.d0(ts, nch) values({ts}, 'abcd')"
tdSql.execute(sql)
sql = f"select cast(nch as tinyint) from {self.db}.d0 where ts={ts}"
tdSql.checkFirstValue(sql, 0)
# iso8601
sql = f'select ts,to_iso8601(ts,"Z"),to_iso8601(ts,"+08"),to_iso8601(ts,"-08") from {self.db}.d0 where ts={self.start_timestamp}'
row = ['2023-11-15 06:13:20.000','2023-11-14T22:13:20.000Z','2023-11-15T06:13:20.000+08','2023-11-14T14:13:20.000-08']
tdSql.checkDataMem(sql, [row])
# constant expr funciton
# count
sql = f"select count(1),count(null) from {self.db}.d0"
tdSql.checkDataMem(sql, [[self.insert_rows+2, 0]])
row = [10, 11.0, "None", 2]
# sum
sql = "select sum(1+9),sum(1.1 + 9.9),sum(null),sum(4/2);"
tdSql.checkDataMem(sql, [row])
# min
sql = "select min(1+9),min(1.1 + 9.9),min(null),min(4/2);"
tdSql.checkDataMem(sql, [row])
# max
sql = "select max(1+9),max(1.1 + 9.9),max(null),max(4/2);"
tdSql.checkDataMem(sql, [row])
# avg
sql = "select avg(1+9),avg(1.1 + 9.9),avg(null),avg(4/2);"
tdSql.checkDataMem(sql, [row])
# stddev
sql = "select stddev(1+9),stddev(1.1 + 9.9),stddev(null),stddev(4/2);"
tdSql.checkDataMem(sql, [[0, 0.0, "None", 0]])
# leastsquares
sql = "select leastsquares(100,2,1), leastsquares(100.2,2.1,1);"
tdSql.query(sql)
# derivative
sql = "select derivative(190999,38.3,1);"
tdSql.checkFirstValue(sql, 0.0)
# irate
sql = "select irate(0);"
tdSql.checkFirstValue(sql, 0.0)
# diff
sql = "select diff(0);"
tdSql.checkFirstValue(sql, 0.0)
# twa
sql = "select twa(10);"
tdSql.checkFirstValue(sql, 10.0)
# mavg
sql = "select mavg(5,10);"
tdSql.checkFirstValue(sql, 5)
# mavg
sql = "select mavg(5,10);"
tdSql.checkFirstValue(sql, 5)
# mavg
sql = "select csum(4+9);"
tdSql.checkFirstValue(sql, 13)
# tail
sql = "select tail(1+9,1),tail(1.1 + 9.9,2),tail(null,3),tail(8/4,3);"
tdSql.error(sql)
sql = "select tail(4+9, 3);"
tdSql.checkFirstValue(sql, 13)
sql = "select tail(null, 1);"
tdSql.checkFirstValue(sql, "None")
# top
sql = "select top(4+9, 3);"
tdSql.checkFirstValue(sql, 13)
sql = "select top(9.9, 3);"
tdSql.checkFirstValue(sql, 9.9)
sql = "select top(null, 1);"
tdSql.error(sql)
# bottom
sql = "select bottom(4+9, 3);"
tdSql.checkFirstValue(sql, 13)
sql = "select bottom(9.9, 3);"
tdSql.checkFirstValue(sql, 9.9)
ops = ['GE', 'GT', 'LE', 'LT', 'EQ', 'NE']
vals = [-1, -1, 1, 1, -1, 1]
cnt = len(ops)
for i in range(cnt):
# statecount
sql = f"select statecount(99,'{ops[i]}',100);"
tdSql.checkFirstValue(sql, vals[i])
sql = f"select statecount(9.9,'{ops[i]}',11.1);"
tdSql.checkFirstValue(sql, vals[i])
# stateduration
sql = f"select stateduration(99,'{ops[i]}',100,1s);"
#tdSql.checkFirstValue(sql, vals[i]) bug need fix
tdSql.execute(sql)
sql = f"select stateduration(9.9,'{ops[i]}',11.1,1s);"
#tdSql.checkFirstValue(sql, vals[i]) bug need fix
tdSql.execute(sql)
sql = "select statecount(9,'EQAAAA',10);"
tdSql.error(sql)
# histogram check crash
sqls = [
'select histogram(200,"user_input","[10, 50, 200]",0);',
'select histogram(22.2,"user_input","[1.01, 5.01, 200.1]",0);',
'select histogram(200,"linear_bin",\'{"start": 0.0,"width": 5.0, "count": 5, "infinity": true}\',0)',
'select histogram(200.2,"linear_bin",\'{"start": 0.0,"width": 5.01, "count": 5, "infinity": true}\',0)',
'select histogram(200,"log_bin",\'{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}\',0)',
'select histogram(200.2,"log_bin",\'{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}\',0)'
]
tdSql.executes(sqls)
# errors check
sql = 'select histogram(200.2,"log_bin",\'start":1.0, "factor: 2.0, "count": 5, "infinity": true}\',0)'
tdSql.error(sql)
sql = 'select histogram("200.2","log_bin",\'start":1.0, "factor: 2.0, "count": 5, "infinity": true}\',0)'
tdSql.error(sql)
# first last
sql = "select first(100-90-1),last(2*5),first(11.1),last(22.2)"
tdSql.checkDataMem(sql, [[9, 10, 11.1, 22.2]])
# sample
sql = "select sample(6, 1);"
tdSql.checkFirstValue(sql, 6)
# spread
sql = "select spread(12);"
tdSql.checkFirstValue(sql, 0)
# percentile
sql = "select percentile(10.1,100);"
tdSql.checkFirstValue(sql, 10.1)
sql = "select percentile(10, 0);"
tdSql.checkFirstValue(sql, 10)
sql = "select percentile(100, 60, 70, 80);"
tdSql.execute(sql)
# apercentile
sql = "select apercentile(10.1,100);"
tdSql.checkFirstValue(sql, 10.1)
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# insert data
self.insertData()
# check insert data correct
self.checkInsertCorrect()
# check
self.checkConsistency("usi")
# do action
self.doQuery()
# check null
self.checkNull()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

189
tests/army/query/show.py Normal file
View File

@ -0,0 +1,189 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.autogen import *
class TDTestCase(TBase):
updatecfgDict = {
}
def insertData(self):
tdLog.info(f"create table and insert data.")
self.stb = "stb"
self.db = "db"
self.childtable_count = 10
self.insert_rows = 10000
self.autoGen = AutoGen(startTs = 1600000000000*1000*1000, batch=500, genDataMode = "fillone")
self.autoGen.create_db(self.db, 2, 3, "precision 'ns'")
self.autoGen.create_stable(stbname = self.stb, tag_cnt = 5, column_cnt = 20, binary_len = 10, nchar_len = 5)
self.autoGen.create_child(self.stb, "child", self.childtable_count)
self.autoGen.insert_data(self.insert_rows, True)
tdLog.info("create view.")
tdSql.execute(f"use {self.db}")
sqls = [
"create view viewc0c1 as select c0,c1 from stb ",
"create view viewc0c1c2 as select c0,c1,c2 from stb ",
"create view viewc0c3 as select c0,c3 from stb where c3=1",
"create view viewc0c4c5 as select c4,c5 from stb ",
"create view viewc0c6 as select c0,c1,c6 from stb ",
"create view viewc0c7 as select c0,c1 from stb ",
"create view viewc0c7c8 as select c0,c7,c8 from stb where c8>0",
"create view viewc0c3c1 as select c0,c3,c1 from stb ",
"create view viewc2c4 as select c2,c4 from stb ",
"create view viewc2c5 as select c2,c5 from stb ",
]
tdSql.executes(sqls)
def checkView(self):
tdLog.info(f"check view like.")
# like
sql = f"show views like 'view%'"
tdSql.query(sql)
tdSql.checkRows(10)
sql = f"show views like 'vie_c0c1c2'"
tdSql.query(sql)
tdSql.checkRows(1)
tdSql.checkData(0,0,"viewc0c1c2")
sql = f"show views like '%c2c_'"
tdSql.query(sql)
tdSql.checkRows(2)
tdSql.checkData(0,0, "viewc2c4")
tdSql.checkData(1,0, "viewc2c5")
sql = f"show views like '%' "
tdSql.query(sql)
tdSql.checkRows(10)
# zero
sql = "show views like '_' "
tdSql.query(sql)
tdSql.checkRows(0)
sql = "show views like 'a%' "
tdSql.query(sql)
tdSql.checkRows(0)
def doQuery(self):
tdLog.info(f"do query.")
# __group_key
sql = f"select count(*) from {self.stb} "
tdSql.query(sql)
# column index 1 value same with 2
allRows = self.insert_rows * self.childtable_count
tdSql.checkFirstValue(sql, allRows)
def checkShowTags(self):
# verification for TD-29904
tdSql.error("show tags from t100000", expectErrInfo='Fail to get table info, error: Table does not exist')
sql = "show tags from child1"
tdSql.query(sql)
tdSql.checkRows(5)
sql = f"show tags from child1 from {self.db}"
tdSql.query(sql)
tdSql.checkRows(5)
sql = f"show tags from {self.db}.child1"
tdSql.query(sql)
tdSql.checkRows(5)
# verification for TD-30030
tdSql.execute("create table t100 (ts timestamp, pk varchar(20) primary key, c1 varchar(100)) tags (id int)")
tdSql.execute("insert into ct1 using t100 tags(1) values('2024-05-17 14:58:52.902', 'a1', '100')")
tdSql.execute("insert into ct1 using t100 tags(1) values('2024-05-17 14:58:52.902', 'a2', '200')")
tdSql.execute("insert into ct1 using t100 tags(1) values('2024-05-17 14:58:52.902', 'a3', '300')")
tdSql.execute("insert into ct2 using t100 tags(2) values('2024-05-17 14:58:52.902', 'a2', '200')")
tdSql.execute("create view v100 as select * from t100")
tdSql.execute("create view v200 as select * from ct1")
tdSql.error("show tags from v100", expectErrInfo="Tags can only applied to super table and child table")
tdSql.error("show tags from v200", expectErrInfo="Tags can only applied to super table and child table")
tdSql.execute("create table t200 (ts timestamp, pk varchar(20) primary key, c1 varchar(100))")
tdSql.error("show tags from t200", expectErrInfo="Tags can only applied to super table and child table")
def checkShow(self):
# not support
sql = "show accounts;"
tdSql.error(sql)
# check result
sql = "SHOW CLUSTER;"
tdSql.query(sql)
tdSql.checkRows(1)
sql = "SHOW COMPACTS;"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "SHOW COMPACT 1;"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "SHOW CLUSTER MACHINES;"
tdSql.query(sql)
tdSql.checkRows(1)
# run to check crash
sqls = [
"show scores;",
"SHOW CLUSTER VARIABLES",
# "SHOW BNODES;",
]
tdSql.executes(sqls)
self.checkShowTags()
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# insert data
self.insertData()
# check view
self.checkView()
# do action
self.doQuery()
# check show
self.checkShow()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,94 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.autogen import *
class TDTestCase(TBase):
def ts_30189(self):
tdLog.info("create database ts_30189")
tdSql.execute(f"create database ts_30189")
tdSql.execute(f"use ts_30189")
sqls = [
"CREATE STABLE `demo` (`_ts` TIMESTAMP, `faev` DOUBLE) TAGS (`deviceid` VARCHAR(256))",
"CREATE TABLE demo_201000008 USING demo (deviceid) TAGS ('201000008')",
"CREATE TABLE demo_K201000258 USING demo (deviceid) TAGS ('K201000258')",
"INSERT INTO demo_201000008 (_ts,faev) VALUES ('2023-11-30 23:59:27.255', 51412.900999999998021)",
"INSERT INTO demo_201000008 (_ts,faev) VALUES ('2023-12-04 23:11:28.179', 51458.900999999998021)",
"INSERT INTO demo_201000008 (_ts,faev) VALUES ('2023-12-04 23:12:28.180', 51458.800999999999476)",
"INSERT INTO demo_201000008 (_ts,faev) VALUES ('2023-12-31 23:59:36.108', 52855.400999999998021)",
"INSERT INTO demo_K201000258 (_ts,faev) VALUES ('2023-11-30 23:59:00.365', 258839.234375000000000)",
"INSERT INTO demo_K201000258 (_ts,faev) VALUES ('2023-12-28 05:00:00.381', 272188.843750000000000)",
"INSERT INTO demo_K201000258 (_ts,faev) VALUES ('2023-12-28 05:01:00.600', 13.909012794494629)",
"INSERT INTO demo_K201000258 (_ts,faev) VALUES ('2023-12-31 23:59:00.366', 1886.711303710937500)",
]
tdSql.executes(sqls)
sql1 = '''
SELECT ts, deviceid, faev FROM (
(
SELECT deviceid, ts, faev FROM (
SELECT deviceid, _ts AS ts, faev, DIFF(ROUND(faev*1000)/1000) AS diff_faev
FROM demo
WHERE deviceid in ('201000008') AND _ts >= '2023-12-01 00:00:00' AND _ts < '2024-01-01 00:00:00'
PARTITION BY deviceid
) WHERE diff_faev < 0
) UNION ALL
(
SELECT deviceid, ts, faev FROM (
SELECT deviceid, ts, faev, DIFF(ROUND(faev*1000)/1000) as diff_faev
FROM ( SELECT deviceid, _ts as ts , faev FROM demo
WHERE deviceid in ('201000008') AND _ts >= '2023-12-01 00:00:00' AND _ts < '2024-01-01 00:00:00'
ORDER BY ts desc ) PARTITION BY deviceid
) WHERE diff_faev > 0
)
UNION ALL
(
SELECT deviceid, LAST(_ts) AS ts, LAST(faev) AS faev FROM demo
WHERE deviceid in ('201000008') AND _ts >= '2023-11-01 00:00:00' AND _ts < '2024-01-01 00:00:00'
PARTITION BY deviceid INTERVAL(1n)
)
) order by ts
'''
tdSql.query(sql1)
tdSql.checkRows(4)
row1 = ['2023-11-30 23:59:27.255', "201000008", 51412.900999999998021]
row2 = ['2023-12-04 23:11:28.179', "201000008", 51458.900999999998021]
row3 = ['2023-12-04 23:12:28.180', "201000008", 51458.800999999999476]
row4 = ['2023-12-31 23:59:36.108', "201000008", 52855.400999999998021]
rows = [row1, row2, row3, row4]
tdSql.checkDataMem(sql1, rows)
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# TS-30189
self.ts_30189()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,672 @@
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.eos import *
class TDTestCase(TBase):
"""Add test case to improve the compare.c coverage
"""
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def prepare_data(self):
# database
tdSql.execute("create database db;")
tdSql.execute("use db;")
# create table
tdSql.execute("create table rt(ts timestamp, c_int8 tinyint, c_uint8 tinyint unsigned, c_bool bool, c_int16 smallint, c_uint16 smallint unsigned, c_int32 int, c_uint32 int unsigned, c_float float, c_int64 bigint, c_uint64 bigint unsigned, c_double double, c_binary binary(16), c_varchar varchar(16), c_nchar nchar(16), c_varbinary varbinary(16));")
# insert data
sql = "insert into rt values \
('2024-05-08 12:00:00.000', 1, 2, true, 1, 2, 1, 2, 1.1, 111111, 222222, 111111.111111, 'a', 'a', 'a', \"0x01\"), \
('2024-05-08 12:00:01.000', 2, 1, false, 2, 1, 2, 1, 2.2, 222222, 111111, 222222.222222, 'b', 'b', 'b', \"0x02\"), \
('2024-05-08 12:00:02.000', 3, 3, true, 3, 3, 3, 3, null, 333333, 333333, 3.1111111, 'c', 'c', 'c', \"0x03\"), \
('2024-05-08 12:00:03.000', 4, 4, false, 4, 4, 4, 4, 4.4, 444444, 222222, 444444.444444, 'd', 'd', 'd', \"0x04\"), \
('2024-05-08 12:00:04.000', 5, 5, true, 5, 5, 5, 5, 5.5, 2, 3, 555555.555555, 'e', 'e', 'e', \"0x05\"), \
('2024-05-08 12:00:05.000', 6, 6, false, -5, 5, 5, 5, 5.0, 6, 6, 5, 'e', 'e', 'e', \"0x06\");"
tdSql.execute(sql)
def test_notin(self):
# setChkNotInBytes1
tdSql.query("select * from rt where c_int8 not in (6, 7);")
tdSql.checkRows(5)
tdSql.query("select * from rt where c_int8 not in (1, 2);")
tdSql.checkRows(4)
tdSql.query("select * from rt where c_bool not in (true);")
tdSql.checkRows(3)
# setChkNotInBytes8
tdSql.query("select * from rt where c_int64 not in (6666666, 7777777);")
tdSql.checkRows(6)
tdSql.query("select * from rt where c_uint64 not in (5555555555);")
tdSql.checkRows(6)
tdSql.query("select * from rt where c_double not in (111111.111111, 222222.222222);")
tdSql.checkRows(4)
# setChkNotInString
tdSql.query("select * from rt where c_binary not in ('f', 'g', 'h');")
tdSql.checkRows(6)
tdSql.query("select * from rt where c_varchar not in ('a', 'b', 'c');")
tdSql.checkRows(3)
tdSql.query("select * from rt where c_nchar not in ('d', 'e', 'f');")
tdSql.checkRows(3)
tdSql.query("select * from rt where c_varbinary not in ('0x01', '0x02');")
tdSql.checkRows(4)
def test_compare_value(self):
# compareUint16Val
tdSql.query("select * from rt where c_uint16 = 5;")
tdSql.checkRows(2)
tdSql.query("select * from rt where c_uint16 < 5;")
tdSql.checkRows(4)
# compareFloatVal
tdSql.query("select * from rt where c_float is null;")
tdSql.checkRows(1)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float < t2.c_float;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float > t2.c_float;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float = t2.c_float;")
tdSql.checkRows(5)
# compareDoubleVal
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double = t2.c_double;")
tdSql.checkRows(6)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double < t2.c_double;")
tdSql.checkRows(15)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double > t2.c_double;")
tdSql.checkRows(15)
def test_compareInt8Int16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 > t2.c_int16;")
tdSql.checkRows(21)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 < t2.c_int16;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 = t2.c_int16;")
tdSql.checkRows(5)
def test_compareInt8Int32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 < t2.c_int32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 > t2.c_int32;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 = t2.c_int32;")
tdSql.checkRows(6)
def test_compareInt8Int64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 < t2.c_int64;")
tdSql.checkRows(30)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 > t2.c_int64;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 = t2.c_int64;")
tdSql.checkRows(2)
def test_compareInt8Double(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 < t2.c_double;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 > t2.c_double;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 = t2.c_double;")
tdSql.checkRows(1)
def test_compareInt8Uint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 < t2.c_uint8;")
tdSql.checkRows(15)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 > t2.c_uint8;")
tdSql.checkRows(15)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 = t2.c_uint8;")
tdSql.checkRows(6)
def test_compareInt8Uint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 < t2.c_uint16;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 > t2.c_uint16;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 = t2.c_uint16;")
tdSql.checkRows(6)
def test_compareInt8Uint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 < t2.c_uint32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 > t2.c_uint32;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 = t2.c_uint32;")
tdSql.checkRows(6)
def test_compareInt8Uint64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 < t2.c_uint64;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 > t2.c_uint64;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int8 = t2.c_uint64;")
tdSql.checkRows(2)
def test_compareInt16Int32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 < t2.c_int32;")
tdSql.checkRows(20)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 > t2.c_int32;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 = t2.c_int32;")
tdSql.checkRows(6)
def test_compareInt16Int64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 < t2.c_int64;")
tdSql.checkRows(32)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 > t2.c_int64;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 = t2.c_int32;")
tdSql.checkRows(6)
def test_compareInt16Double(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 < t2.c_double;")
tdSql.checkRows(33)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 > t2.c_double;")
tdSql.checkRows(2)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 = t2.c_double;")
tdSql.checkRows(1)
def test_compareInt16Uint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 < t2.c_uint8;")
tdSql.checkRows(21)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 > t2.c_uint8;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 = t2.c_uint8;")
tdSql.checkRows(5)
def test_compareInt16Uint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 < t2.c_uint16;")
tdSql.checkRows(20)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 > t2.c_uint16;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 = t2.c_uint16;")
tdSql.checkRows(6)
def test_compareInt16Uint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 < t2.c_uint32;")
tdSql.checkRows(20)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 > t2.c_uint32;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 = t2.c_uint32;")
tdSql.checkRows(6)
def test_compareInt16Uint64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 < t2.c_uint64;")
tdSql.checkRows(33)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 > t2.c_uint64;")
tdSql.checkRows(2)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int16 = t2.c_uint64;")
tdSql.checkRows(1)
def test_compareInt32Int16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 < t2.c_int16;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 > t2.c_int16;")
tdSql.checkRows(20)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 = t2.c_int16;")
tdSql.checkRows(6)
def test_compareInt32Int64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 < t2.c_int64;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 > t2.c_int64;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 = t2.c_int64;")
tdSql.checkRows(1)
def test_compareInt32Float(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 < t2.c_float;")
tdSql.checkRows(17)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 > t2.c_float;")
tdSql.checkRows(11)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 = t2.c_float;")
tdSql.checkRows(2)
def test_compareInt32Uint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 < t2.c_uint8;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 > t2.c_uint8;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 = t2.c_uint8;")
tdSql.checkRows(6)
def test_compareInt32Uint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 < t2.c_uint16;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 > t2.c_uint16;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 = t2.c_uint16;")
tdSql.checkRows(8)
def test_compareInt32Uint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 < t2.c_uint32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 > t2.c_uint32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 = t2.c_uint32;")
tdSql.checkRows(8)
def test_compareInt32Uint64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 < t2.c_uint64;")
tdSql.checkRows(32)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 > t2.c_uint64;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int32 = t2.c_uint64;")
tdSql.checkRows(1)
def test_compareInt64Uint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 < t2.c_uint8;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 > t2.c_uint8;")
tdSql.checkRows(30)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 = t2.c_uint8;")
tdSql.checkRows(2)
def test_compareInt64Uint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 < t2.c_uint16;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 > t2.c_uint16;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 = t2.c_uint16;")
tdSql.checkRows(1)
def test_compareInt64Uint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 < t2.c_uint32;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 > t2.c_uint32;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_int64 = t2.c_uint32;")
tdSql.checkRows(1)
def test_compareFloatInt32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float < t2.c_int32;")
tdSql.checkRows(11)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float > t2.c_int32;")
tdSql.checkRows(17)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float = t2.c_int32;")
tdSql.checkRows(2)
def test_compareFloatUint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float < t2.c_uint8;")
tdSql.checkRows(13)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float > t2.c_uint8;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float = t2.c_uint8;")
tdSql.checkRows(1)
def test_compareFloatUint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float < t2.c_uint16;")
tdSql.checkRows(11)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float > t2.c_uint16;")
tdSql.checkRows(17)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float = t2.c_uint16;")
tdSql.checkRows(2)
def test_compareFloatUint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float < t2.c_uint32;")
tdSql.checkRows(11)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float > t2.c_uint32;")
tdSql.checkRows(17)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float = t2.c_uint32;")
tdSql.checkRows(2)
def test_compareFloatUint64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float < t2.c_uint64;")
tdSql.checkRows(27)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float > t2.c_uint64;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_float = t2.c_uint64;")
tdSql.checkRows(0)
def test_compareDoubleUint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double < t2.c_uint8;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double > t2.c_uint8;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double = t2.c_uint8;")
tdSql.checkRows(1)
def test_compareDoubleUint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double < t2.c_uint16;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double > t2.c_uint16;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double = t2.c_uint16;")
tdSql.checkRows(2)
def test_compareDoubleUint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double < t2.c_uint32;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double > t2.c_uint32;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double = t2.c_uint32;")
tdSql.checkRows(2)
def test_compareDoubleUint64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double < t2.c_uint64;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double > t2.c_uint64;")
tdSql.checkRows(22)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_double = t2.c_uint64;")
tdSql.checkRows(0)
def test_compareUint8Int16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 < t2.c_int16;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 > t2.c_int16;")
tdSql.checkRows(21)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 = t2.c_int16;")
tdSql.checkRows(5)
def test_compareUint8Int32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 < t2.c_int32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 > t2.c_int32;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 = t2.c_int32;")
tdSql.checkRows(6)
def test_compareUint8Int64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 < t2.c_int64;")
tdSql.checkRows(30)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 > t2.c_int64;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 = t2.c_int64;")
tdSql.checkRows(2)
def test_compareUint8Float(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 < t2.c_float;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 > t2.c_float;")
tdSql.checkRows(13)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 = t2.c_float;")
tdSql.checkRows(1)
def test_compareUint8Double(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 < t2.c_double;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 > t2.c_double;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 = t2.c_double;")
tdSql.checkRows(1)
def test_compareUint8Uint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 < t2.c_uint16;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 > t2.c_uint16;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 = t2.c_uint16;")
tdSql.checkRows(6)
def test_compareUint8Uint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 < t2.c_uint32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 > t2.c_uint32;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 = t2.c_uint32;")
tdSql.checkRows(6)
def test_compareUint8Uint64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 < t2.c_uint64;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 > t2.c_uint64;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint8 = t2.c_uint64;")
tdSql.checkRows(2)
def test_compareUint16Int16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 < t2.c_int16;")
tdSql.checkRows(10)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 > t2.c_int16;")
tdSql.checkRows(20)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 = t2.c_int16;")
tdSql.checkRows(6)
def test_compareUint16Int32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 < t2.c_int32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 > t2.c_int32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 = t2.c_int32;")
tdSql.checkRows(8)
def test_compareUint16Int64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 < t2.c_int64;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 > t2.c_int64;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 = t2.c_int64;")
tdSql.checkRows(1)
def test_compareUint16Float(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 < t2.c_float;")
tdSql.checkRows(17)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 > t2.c_float;")
tdSql.checkRows(11)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 = t2.c_float;")
tdSql.checkRows(2)
def test_compareUint16Double(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 < t2.c_double;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 > t2.c_double;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 = t2.c_double;")
tdSql.checkRows(2)
def test_compareUint16Uint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 < t2.c_uint8;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 > t2.c_uint8;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 = t2.c_uint8;")
tdSql.checkRows(6)
def test_compareUint16Uint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 < t2.c_uint32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 > t2.c_uint32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint16 = t2.c_uint32;")
tdSql.checkRows(8)
def test_compareUint32Int32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 < t2.c_int32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 > t2.c_int32;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 = t2.c_int32;")
tdSql.checkRows(8)
def test_compareUint32Int64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 < t2.c_int64;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 > t2.c_int64;")
tdSql.checkRows(4)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 = t2.c_int64;")
tdSql.checkRows(1)
def test_compareUint32Float(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 < t2.c_float;")
tdSql.checkRows(17)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 > t2.c_float;")
tdSql.checkRows(11)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 = t2.c_float;")
tdSql.checkRows(2)
def test_compareUint32Double(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 < t2.c_double;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 > t2.c_double;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 = t2.c_double;")
tdSql.checkRows(2)
def test_compareUint32Uint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 < t2.c_uint8;")
tdSql.checkRows(16)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 > t2.c_uint8;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 = t2.c_uint8;")
tdSql.checkRows(6)
def test_compareUint32Uint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 < t2.c_uint16;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 > t2.c_uint16;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 = t2.c_uint16;")
tdSql.checkRows(8)
def test_compareUint32Uint64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 < t2.c_uint64;")
tdSql.checkRows(32)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 > t2.c_uint64;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint32 = t2.c_uint64;")
tdSql.checkRows(1)
def test_compareUint64Int16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 < t2.c_int16;")
tdSql.checkRows(2)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 > t2.c_int16;")
tdSql.checkRows(33)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 = t2.c_int16;")
tdSql.checkRows(1)
def test_compareUint64Int32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 < t2.c_int32;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 > t2.c_int32;")
tdSql.checkRows(32)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 = t2.c_int32;")
tdSql.checkRows(1)
def test_compareUint64Int64(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 < t2.c_int64;")
tdSql.checkRows(17)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 > t2.c_int64;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 = t2.c_int64;")
tdSql.checkRows(5)
def test_compareUint64Float(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 < t2.c_float;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 > t2.c_float;")
tdSql.checkRows(27)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 = t2.c_float;")
tdSql.checkRows(0)
def test_compareUint64Double(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 < t2.c_double;")
tdSql.checkRows(22)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 > t2.c_double;")
tdSql.checkRows(14)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 = t2.c_double;")
tdSql.checkRows(0)
def test_compareUint64Uint8(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 < t2.c_uint8;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 > t2.c_uint8;")
tdSql.checkRows(31)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 = t2.c_uint8;")
tdSql.checkRows(2)
def test_compareUint64Uint16(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 < t2.c_uint16;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 > t2.c_uint16;")
tdSql.checkRows(32)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 = t2.c_uint16;")
tdSql.checkRows(1)
def test_compareUint64Uint32(self):
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 < t2.c_uint32;")
tdSql.checkRows(3)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 > t2.c_uint32;")
tdSql.checkRows(32)
tdSql.query("select * from rt t1 left join rt t2 on timetruncate(t1.ts, 1m) = timetruncate(t2.ts, 1m) where t1.c_uint64 = t2.c_uint32;")
tdSql.checkRows(1)
def run(self):
self.prepare_data()
self.test_notin()
self.test_compare_value()
self.test_compareInt8Int16()
self.test_compareInt8Int32()
self.test_compareInt8Int64()
self.test_compareInt8Double()
self.test_compareInt8Uint8()
self.test_compareInt8Uint16()
self.test_compareInt8Uint32()
self.test_compareInt8Uint64()
self.test_compareInt16Int32()
self.test_compareInt16Int64()
self.test_compareInt16Double()
self.test_compareInt16Uint8()
self.test_compareInt16Uint16()
self.test_compareInt16Uint32()
self.test_compareInt16Uint64()
self.test_compareInt32Int16()
self.test_compareInt32Int64()
self.test_compareInt32Float()
self.test_compareInt32Uint8()
self.test_compareInt32Uint16()
self.test_compareInt32Uint32()
self.test_compareInt32Uint64()
self.test_compareInt64Uint8()
self.test_compareInt64Uint16()
self.test_compareInt64Uint32()
self.test_compareFloatInt32()
self.test_compareFloatUint8()
self.test_compareFloatUint16()
self.test_compareFloatUint32()
self.test_compareFloatUint64()
self.test_compareDoubleUint8()
self.test_compareDoubleUint16()
self.test_compareDoubleUint32()
self.test_compareDoubleUint64()
self.test_compareUint8Int16()
self.test_compareUint8Int32()
self.test_compareUint8Int64()
self.test_compareUint8Float()
self.test_compareUint8Double()
self.test_compareUint8Uint16()
self.test_compareUint8Uint32()
self.test_compareUint8Uint64()
self.test_compareUint16Int16()
self.test_compareUint16Int32()
self.test_compareUint16Int64()
self.test_compareUint16Float()
self.test_compareUint16Double()
self.test_compareUint16Uint8()
self.test_compareUint16Uint32()
self.test_compareUint32Int32()
self.test_compareUint32Int64()
self.test_compareUint32Float()
self.test_compareUint32Double()
self.test_compareUint32Uint8()
self.test_compareUint32Uint16()
self.test_compareUint32Uint64()
self.test_compareUint64Int16()
self.test_compareUint64Int32()
self.test_compareUint64Int64()
self.test_compareUint64Float()
self.test_compareUint64Double()
self.test_compareUint64Uint8()
self.test_compareUint64Uint16()
self.test_compareUint64Uint32()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,66 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 4000,
"prepared_rand": 500,
"thread_count": 4,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "yes",
"vgroups": 2,
"replica": 1,
"duration":"10d",
"s3_keeplocal":"30d",
"s3_chunksize":"131072",
"tsdb_pagesize":"1",
"s3_compact":"1",
"wal_retention_size":"1",
"wal_retention_period":"1",
"flush_each_batch":"no",
"keep": "3650d"
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "no",
"childtable_count": 6,
"insert_rows": 2000000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"timestamp_step": 1000,
"start_timestamp": 1600000000000,
"columns": [
{ "type": "bool", "name": "bc"},
{ "type": "float", "name": "fc" },
{ "type": "double", "name": "dc"},
{ "type": "tinyint", "name": "ti"},
{ "type": "smallint", "name": "si" },
{ "type": "int", "name": "ic" ,"max": 1,"min": 1},
{ "type": "bigint", "name": "bi" },
{ "type": "utinyint", "name": "uti"},
{ "type": "usmallint", "name": "usi"},
{ "type": "uint", "name": "ui" },
{ "type": "ubigint", "name": "ubi"},
{ "type": "binary", "name": "bin", "len": 32},
{ "type": "nchar", "name": "nch", "len": 64}
],
"tags": [
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
{"name": "location","type": "binary", "len": 16, "values":
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

357
tests/army/s3/s3Basic.py Normal file
View File

@ -0,0 +1,357 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame
import frame.etool
import frame.eos
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame.srvCtl import *
from frame import *
from frame.eos import *
#
# 192.168.1.52 MINIO S3
#
'''
s3EndPoint http://192.168.1.52:9000
s3AccessKey 'zOgllR6bSnw2Ah3mCNel:cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX'
s3BucketName ci-bucket
s3UploadDelaySec 60
for test:
"s3AccessKey" : "fGPPyYjzytw05nw44ViA:vK1VcwxgSOykicx6hk8fL1x15uEtyDSFU3w4hTaZ"
"s3BucketName": "test-bucket"
'''
class TDTestCase(TBase):
index = util.cpuRand(20) + 1
bucketName = f"ci-bucket{index}"
updatecfgDict = {
"supportVnodes":"1000",
's3EndPoint': 'http://192.168.1.52:9000',
's3AccessKey': 'zOgllR6bSnw2Ah3mCNel:cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX',
's3BucketName': f'{bucketName}',
's3PageCacheSize': '10240',
"s3UploadDelaySec": "10",
's3MigrateIntervalSec': '600',
's3MigrateEnabled': '1'
}
tdLog.info(f"assign bucketName is {bucketName}\n")
maxFileSize = (128 + 10) * 1014 * 1024 # add 10M buffer
def insertData(self):
tdLog.info(f"insert data.")
# taosBenchmark run
json = etool.curFile(__file__, "s3Basic.json")
etool.benchMark(json=json)
tdSql.execute(f"use {self.db}")
# come from s3_basic.json
self.childtable_count = 6
self.insert_rows = 2000000
self.timestamp_step = 1000
def createStream(self, sname):
sql = f"create stream {sname} fill_history 1 into stm1 as select count(*) from {self.db}.{self.stb} interval(10s);"
tdSql.execute(sql)
def migrateDbS3(self):
sql = f"s3migrate database {self.db}"
tdSql.execute(sql, show=True)
def checkDataFile(self, lines, maxFileSize):
# ls -l
# -rwxrwxrwx 1 root root 41652224 Apr 17 14:47 vnode2/tsdb/v2f1974ver47.3.data
overCnt = 0
for line in lines:
cols = line.split()
fileSize = int(cols[4])
fileName = cols[8]
#print(f" filesize={fileSize} fileName={fileName} line={line}")
if fileSize > maxFileSize:
tdLog.info(f"error, {fileSize} over max size({maxFileSize}) {fileName}\n")
overCnt += 1
else:
tdLog.info(f"{fileName}({fileSize}) check size passed.")
return overCnt
def checkUploadToS3(self):
rootPath = sc.clusterRootPath()
cmd = f"ls -l {rootPath}/dnode*/data/vnode/vnode*/tsdb/*.data"
tdLog.info(cmd)
loop = 0
rets = []
overCnt = 0
while loop < 200:
time.sleep(3)
# check upload to s3
rets = eos.runRetList(cmd)
cnt = len(rets)
if cnt == 0:
overCnt = 0
tdLog.info("All data file upload to server over.")
break
overCnt = self.checkDataFile(rets, self.maxFileSize)
if overCnt == 0:
uploadOK = True
tdLog.info(f"All data files({len(rets)}) size bellow {self.maxFileSize}, check upload to s3 ok.")
break
tdLog.info(f"loop={loop} no upload {overCnt} data files wait 3s retry ...")
if loop == 3:
sc.dnodeStop(1)
time.sleep(2)
sc.dnodeStart(1)
loop += 1
# miggrate
self.migrateDbS3()
# check can pass
if overCnt > 0:
tdLog.exit(f"s3 have {overCnt} files over size.")
def doAction(self):
tdLog.info(f"do action.")
self.flushDb(show=True)
#self.compactDb(show=True)
# sleep 70s
self.migrateDbS3()
# check upload to s3
self.checkUploadToS3()
def checkStreamCorrect(self):
sql = f"select count(*) from {self.db}.stm1"
count = 0
for i in range(120):
tdSql.query(sql)
count = tdSql.getData(0, 0)
if count == 100000 or count == 100001:
return True
time.sleep(1)
tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}")
def checkCreateDb(self, keepLocal, chunkSize, compact):
# keyword
kw1 = kw2 = kw3 = ""
if keepLocal is not None:
kw1 = f"s3_keeplocal {keepLocal}"
if chunkSize is not None:
kw2 = f"s3_chunksize {chunkSize}"
if compact is not None:
kw3 = f"s3_compact {compact}"
sql = f" create database db1 duration 1h {kw1} {kw2} {kw3}"
tdSql.execute(sql, show=True)
#sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';"
sql = f"select * from information_schema.ins_databases where name='db1';"
tdSql.query(sql)
# 29 30 31 -> chunksize keeplocal compact
if chunkSize is not None:
tdSql.checkData(0, 29, chunkSize)
if keepLocal is not None:
keepLocalm = keepLocal * 24 * 60
tdSql.checkData(0, 30, f"{keepLocalm}m")
if compact is not None:
tdSql.checkData(0, 31, compact)
sql = "drop database db1"
tdSql.execute(sql)
def checkExcept(self):
# errors
sqls = [
f"create database db2 s3_keeplocal -1",
f"create database db2 s3_keeplocal 0",
f"create database db2 s3_keeplocal 365001",
f"create database db2 s3_chunksize -1",
f"create database db2 s3_chunksize 0",
f"create database db2 s3_chunksize 900000000",
f"create database db2 s3_compact -1",
f"create database db2 s3_compact 100",
f"create database db2 duration 1d s3_keeplocal 1d"
]
tdSql.errors(sqls)
def checkBasic(self):
# create db
keeps = [1, 256, 1024, 365000, None]
chunks = [131072, 600000, 820000, 1048576, None]
comps = [0, 1, None]
for keep in keeps:
for chunk in chunks:
for comp in comps:
self.checkCreateDb(keep, chunk, comp)
# --checks3
idx = 1
taosd = sc.taosdFile(idx)
cfg = sc.dnodeCfgPath(idx)
cmd = f"{taosd} -c {cfg} --checks3"
eos.exe(cmd)
#output, error = eos.run(cmd)
#print(lines)
'''
tips = [
"put object s3test.txt: success",
"listing bucket ci-bucket: success",
"get object s3test.txt: success",
"delete object s3test.txt: success"
]
pos = 0
for tip in tips:
pos = output.find(tip, pos)
#if pos == -1:
# tdLog.exit(f"checks3 failed not found {tip}. cmd={cmd} output={output}")
'''
# except
self.checkExcept()
#
def preDb(self, vgroups):
cnt = int(time.time())%2 + 1
for i in range(cnt):
vg = util.cpuRand(9) + 1
sql = f"create database predb vgroups {vg}"
tdSql.execute(sql, show=True)
sql = "drop database predb"
tdSql.execute(sql, show=True)
# history
def insertHistory(self):
tdLog.info(f"insert history data.")
# taosBenchmark run
json = etool.curFile(__file__, "s3Basic1.json")
etool.benchMark(json=json)
# come from s3_basic.json
self.insert_rows += self.insert_rows/4
self.timestamp_step = 500
# delete
def checkDelete(self):
# del 1000 rows
start = 1600000000000
drows = 200
for i in range(1, drows, 2):
sql = f"from {self.db}.{self.stb} where ts = {start + i*500}"
tdSql.execute("delete " + sql, show=True)
tdSql.query("select * " + sql)
tdSql.checkRows(0)
# delete all 500 step
self.flushDb()
self.compactDb()
self.insert_rows -= drows/2
sql = f"select count(*) from {self.db}.{self.stb}"
tdSql.checkAgg(sql, self.insert_rows * self.childtable_count)
# delete 10W rows from 100000
drows = 100000
sdel = start + 100000 * self.timestamp_step
edel = start + 100000 * self.timestamp_step + drows * self.timestamp_step
sql = f"from {self.db}.{self.stb} where ts >= {sdel} and ts < {edel}"
tdSql.execute("delete " + sql, show=True)
tdSql.query("select * " + sql)
tdSql.checkRows(0)
self.insert_rows -= drows
sql = f"select count(*) from {self.db}.{self.stb}"
tdSql.checkAgg(sql, self.insert_rows * self.childtable_count)
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
self.sname = "stream1"
if eos.isArm64Cpu():
tdLog.success(f"{__file__} arm64 ignore executed")
else:
self.preDb(10)
# insert data
self.insertData()
# creat stream
self.createStream(self.sname)
# check insert data correct
#self.checkInsertCorrect()
# save
self.snapshotAgg()
# do action
self.doAction()
# check save agg result correct
self.checkAggCorrect()
# check insert correct again
self.checkInsertCorrect()
# checkBasic
self.checkBasic()
# check stream correct and drop stream
#self.checkStreamCorrect()
# drop stream
self.dropStream(self.sname)
# insert history disorder data
self.insertHistory()
#self.checkInsertCorrect()
self.snapshotAgg()
self.doAction()
self.checkAggCorrect()
self.checkInsertCorrect(difCnt=self.childtable_count*1499999)
self.checkDelete()
self.doAction()
# drop database and free s3 file
self.dropDb()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,66 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 5000,
"prepared_rand": 500,
"thread_count": 4,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "no",
"vgroups": 2,
"replica": 1,
"duration":"10d",
"s3_keeplocal":"30d",
"s3_chunksize":"131072",
"tsdb_pagesize":"1",
"s3_compact":"1",
"wal_retention_size":"1",
"wal_retention_period":"1",
"flush_each_batch":"no",
"keep": "3650d"
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "yes",
"childtable_count": 6,
"insert_rows": 1000000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"timestamp_step": 500,
"start_timestamp": 1600000000000,
"columns": [
{ "type": "bool", "name": "bc"},
{ "type": "float", "name": "fc" },
{ "type": "double", "name": "dc"},
{ "type": "tinyint", "name": "ti"},
{ "type": "smallint", "name": "si" },
{ "type": "int", "name": "ic" ,"max": 1,"min": 1},
{ "type": "bigint", "name": "bi" },
{ "type": "utinyint", "name": "uti"},
{ "type": "usmallint", "name": "usi"},
{ "type": "uint", "name": "ui" },
{ "type": "ubigint", "name": "ubi"},
{ "type": "binary", "name": "bin", "len": 32},
{ "type": "nchar", "name": "nch", "len": 64}
],
"tags": [
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
{"name": "location","type": "binary", "len": 16, "values":
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

View File

@ -0,0 +1,290 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.autogen import *
class TDTestCase(TBase):
updatecfgDict = {
"compressMsgSize" : "100",
}
# compress
compresses = ["lz4","tsz","zlib","zstd","disabled","xz"]
# level
levels = ["high","medium","low"]
# default compress
defCompress = "lz4"
# default level
defLevel = "medium"
# datatype 17
dtypes = [ "tinyint","tinyint unsigned","smallint","smallint unsigned","int","int unsigned",
"bigint","bigint unsigned","timestamp","bool","float","double","binary(16)","nchar(16)",
"varchar(16)","varbinary(16)"]
def combineValid(self, datatype, encode, compress):
if datatype != "float" and datatype != "double":
if compress == "tsz":
return False
return True
def genAllSqls(self, stbName, max):
# encode
encodes = [
[["tinyint","tinyint unsigned","smallint","smallint unsigned","int","int unsigned","bigint","bigint unsigned"], ["simple8B"]],
[["timestamp","bigint","bigint unsigned"], ["Delta-i"]],
[["bool"], ["Bit-packing"]],
[["float","double"], ["Delta-d"]]
]
c = 0 # column number
t = 0 # table number
sqls = []
sql = ""
# loop append sqls
for lines in encodes:
for datatype in lines[0]:
for encode in lines[1]:
for compress in self.compresses:
for level in self.levels:
if sql == "":
# first
sql = f"create table {self.db}.st{t} (ts timestamp"
else:
if self.combineValid(datatype, encode, compress):
sql += f", c{c} {datatype} ENCODE '{encode}' COMPRESS '{compress}' LEVEL '{level}'"
c += 1
if c >= max:
# append sqls
sql += f") tags(groupid int) "
sqls.append(sql)
# reset
sql = ""
c = 0
t += 1
# break loop
if c > 0:
# append sqls
sql += f") tags(groupid int) "
sqls.append(sql)
return sqls
# check error create
def errorCreate(self):
sqls = [
f"create table terr(ts timestamp, c0 int ENCODE 'simple8B' COMPRESS 'tsz' LEVEL 'high') ",
f"create table terr(ts timestamp, bi bigint encode 'bit-packing') tags (area int);"
f"create table terr(ts timestamp, ic int encode 'delta-d') tags (area int);"
]
tdSql.errors(sqls)
for dtype in self.dtypes:
# encode
sql = f"create table terr(ts timestamp, c0 {dtype} ENCODE 'abc') "
tdSql.error(sql)
# compress
sql = f"create table terr(ts timestamp, c0 {dtype} COMPRESS 'def') "
tdSql.error(sql)
# level
sql = f"create table terr(ts timestamp, c0 {dtype} LEVEL 'hig') "
tdSql.error(sql)
# tsz check
if dtype != "float" and dtype != "double":
sql = f"create table terr(ts timestamp, c0 {dtype} COMPRESS 'tsz') "
tdSql.error(sql)
# default value correct
def defaultCorrect(self):
# get default encode compress level
sql = f"describe {self.db}.{self.stb}"
tdSql.query(sql)
# see AutoGen.types
defEncodes = [ "delta-i","delta-i","simple8b","simple8b","simple8b","simple8b","simple8b","simple8b",
"simple8b","simple8b","delta-d","delta-d","bit-packing",
"disabled","disabled","disabled","disabled"]
count = tdSql.getRows()
for i in range(count):
node = tdSql.getData(i, 3)
if node == "TAG":
break
# check
tdSql.checkData(i, 4, defEncodes[i])
tdSql.checkData(i, 5, self.defCompress)
tdSql.checkData(i, 6, self.defLevel)
# geometry encode is disabled
sql = f"create table {self.db}.ta(ts timestamp, pos geometry(64)) "
tdSql.execute(sql)
sql = f"describe {self.db}.ta"
tdSql.query(sql)
tdSql.checkData(1, 4, "disabled")
tdLog.info("check default encode compress and level successfully.")
def checkDataDesc(self, tbname, row, col, value):
sql = f"describe {tbname}"
tdSql.query(sql)
tdSql.checkData(row, col, value)
def writeData(self, count):
self.autoGen.insert_data(count, True)
# alter encode compress level
def checkAlter(self):
tbname = f"{self.db}.{self.stb}"
# alter encode 4
comp = "delta-i"
sql = f"alter table {tbname} modify column c7 ENCODE '{comp}';"
tdSql.execute(sql, show=True)
self.checkDataDesc(tbname, 8, 4, comp)
self.writeData(1000)
sql = f"alter table {tbname} modify column c8 ENCODE '{comp}';"
tdSql.execute(sql, show=True)
self.checkDataDesc(tbname, 9, 4, comp)
self.writeData(1000)
# alter compress 5
comps = self.compresses[2:]
comps.append(self.compresses[0]) # add lz4
for comp in comps:
for i in range(self.colCnt - 1):
col = f"c{i}"
sql = f"alter table {tbname} modify column {col} COMPRESS '{comp}';"
tdSql.execute(sql, show=True)
self.checkDataDesc(tbname, i + 1, 5, comp)
self.writeData(1000)
# alter float(c9) double(c10) to tsz
comp = "tsz"
sql = f"alter table {tbname} modify column c9 COMPRESS '{comp}';"
tdSql.execute(sql)
self.checkDataDesc(tbname, 10, 5, comp)
self.writeData(10000)
sql = f"alter table {tbname} modify column c10 COMPRESS '{comp}';"
tdSql.execute(sql)
self.checkDataDesc(tbname, 11, 5, comp)
self.writeData(10000)
# alter level 6
for level in self.levels:
for i in range(self.colCnt - 1):
col = f"c{i}"
sql = f"alter table {tbname} modify column {col} LEVEL '{level}';"
tdSql.execute(sql)
self.writeData(1000)
# alter error
sqls = [
"alter table nodb.nostb modify column ts LEVEL 'high';",
"alter table db.stb modify column ts encode 'simple8b';",
"alter table db.stb modify column c1 compress 'errorcompress';",
"alter table db.stb modify column c2 level 'errlevel';",
"alter table db.errstb modify column c3 compress 'xz';"
]
tdSql.errors(sqls)
def validCreate(self):
sqls = self.genAllSqls(self.stb, 50)
tdSql.executes(sqls, show=True)
# sql syntax
def checkSqlSyntax(self):
# create tables positive
self.validCreate()
# create table negtive
self.errorCreate()
# check default value corrent
self.defaultCorrect()
# check alter and write
self.checkAlter()
def checkCorrect(self):
# check data correct
tbname = f"{self.db}.{self.stb}"
# count
sql = f"select count(*) from {tbname}"
count = tdSql.getFirstValue(sql)
step = 100000
offset = 0
while offset < count:
sql = f"select * from {tbname} limit {step} offset {offset}"
tdSql.query(sql)
self.autoGen.dataCorrect(tdSql.res, tdSql.getRows(), step)
offset += step
tdLog.info(f"check data correct rows={offset}")
tdLog.info(F"check {tbname} rows {count} data correct successfully.")
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# create db and stable
self.autoGen = AutoGen(step = 10, genDataMode = "fillts")
self.autoGen.create_db(self.db, 2, 3)
tdSql.execute(f"use {self.db}")
self.colCnt = 17
self.autoGen.create_stable(self.stb, 5, self.colCnt, 32, 32)
self.childCnt = 4
self.autoGen.create_child(self.stb, "d", self.childCnt)
self.autoGen.insert_data(1000)
# sql syntax
self.checkSqlSyntax()
# operateor
self.writeData(1000)
self.flushDb()
self.writeData(1000)
# check corrent
self.checkCorrect()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,66 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"connection_pool_size": 8,
"num_of_records_per_req": 3000,
"prepared_rand": 3000,
"thread_count": 2,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"databases": [
{
"dbinfo": {
"name": "db",
"drop": "yes",
"vgroups": 2,
"replica": 3,
"wal_retention_period": 10,
"wal_retention_size": 100,
"keep": "60d,120d,365d",
"stt_trigger": 1,
"wal_level": 2,
"WAL_FSYNC_PERIOD": 3300,
"cachemodel": "'last_value'",
"TABLE_PREFIX":1,
"comp": 1
},
"super_tables": [
{
"name": "stb",
"child_table_exists": "no",
"childtable_count": 10,
"insert_rows": 100000,
"childtable_prefix": "d",
"insert_mode": "taosc",
"timestamp_step": 1000,
"start_timestamp":"now-360d",
"columns": [
{ "type": "bool", "name": "bc","max": 1,"min": 1},
{ "type": "float", "name": "fc" ,"max": 101,"min": 101},
{ "type": "double", "name": "dc" ,"max": 102,"min": 102},
{ "type": "tinyint", "name": "ti" ,"max": 103,"min": 103},
{ "type": "smallint", "name": "si" ,"max": 104,"min": 104},
{ "type": "int", "name": "ic" ,"max": 105,"min": 105},
{ "type": "bigint", "name": "bi" ,"max": 106,"min": 106},
{ "type": "utinyint", "name": "uti","max": 107,"min": 107},
{ "type": "usmallint", "name": "usi","max": 108,"min": 108},
{ "type": "uint", "name": "ui" ,"max": 109,"min": 109},
{ "type": "ubigint", "name": "ubi","max": 110,"min": 110},
{ "type": "binary", "name": "bin", "len": 16},
{ "type": "nchar", "name": "nch", "len": 32}
],
"tags": [
{"type": "tinyint", "name": "groupid","max": 100,"min": 100},
{"name": "location","type": "binary", "len": 16, "values":
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
}
]
}
]
}
]
}

View File

@ -0,0 +1,140 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import random
import taos
import frame
import frame.etool
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
class TDTestCase(TBase):
updatecfgDict = {
"compressMsgSize" : "100",
}
def insertData(self):
tdLog.info(f"insert data.")
# taosBenchmark run
jfile = etool.curFile(__file__, "oneStageComp.json")
etool.benchMark(json=jfile)
tdSql.execute(f"use {self.db}")
# set insert data information
self.childtable_count = 10
self.insert_rows = 100000
self.timestamp_step = 1000
def checkColValueCorrect(self):
tdLog.info(f"do action.")
self.flushDb()
# check all columns correct
cnt = self.insert_rows * self.childtable_count
sql = "select * from stb where bc!=1"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where fc=101"
tdSql.query(sql)
tdSql.checkRows(cnt)
sql = "select * from stb where dc!=102"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where ti!=103"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where si!=104"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where ic!=105"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where bi!=106"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where uti!=107"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where usi!=108"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where ui!=109"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where ubi!=110"
tdSql.query(sql)
tdSql.checkRows(0)
def insertNull(self):
# insert 6 lines
sql = "insert into d0(ts) values(now) (now + 1s) (now + 2s) (now + 3s) (now + 4s) (now + 5s)"
tdSql.execute(sql)
self.flushDb()
self.trimDb()
# check all columns correct
cnt = self.insert_rows * self.childtable_count
sql = "select * from stb where bc!=1"
tdSql.query(sql)
tdSql.checkRows(0)
sql = "select * from stb where bc is null"
tdSql.query(sql)
tdSql.checkRows(6)
sql = "select * from stb where bc=1"
tdSql.query(sql)
tdSql.checkRows(cnt)
sql = "select * from stb where usi is null"
tdSql.query(sql)
tdSql.checkRows(6)
# run
def run(self):
tdLog.debug(f"start to excute {__file__}")
# insert data
self.insertData()
# check insert data correct
self.checkInsertCorrect()
# save
self.snapshotAgg()
# do action
self.checkColValueCorrect()
# check save agg result correct
self.checkAggCorrect()
# insert null
self.insertNull()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -10,26 +10,26 @@
#
# army-test
#
,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f enterprise/db-encrypt/basic.py
,,n,army,python3 ./test.py -f enterprise/s3/s3Basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f community/query/function/test_func_elapsed.py
,,y,army,./pytest.sh python3 ./test.py -f community/query/test_join.py
,,y,army,./pytest.sh python3 ./test.py -f community/query/test_compare.py
,,y,army,./pytest.sh python3 ./test.py -f community/insert/test_column_tag_boundary.py
,,y,army,./pytest.sh python3 ./test.py -f community/query/fill/fill_desc.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/incSnapshot.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f community/query/query_basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f community/query/accuracy/test_query_accuracy.py
,,y,army,./pytest.sh python3 ./test.py -f community/insert/insert_basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/splitVgroupByLearner.py -N 3
,,n,army,python3 ./test.py -f community/cmdline/fullopt.py
,,n,army,python3 ./test.py -f community/query/show.py -N 3
,,n,army,python3 ./test.py -f enterprise/alter/alterConfig.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f community/query/subquery/subqueryBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f community/storage/oneStageComp.py -N 3 -L 3 -D 1
,,y,army,./pytest.sh python3 ./test.py -f community/storage/compressBasic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py
,,n,army,python3 ./test.py -f s3/s3Basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py
,,y,army,./pytest.sh python3 ./test.py -f query/test_join.py
,,y,army,./pytest.sh python3 ./test.py -f query/test_compare.py
,,y,army,./pytest.sh python3 ./test.py -f insert/test_column_tag_boundary.py
,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_desc.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f cluster/incSnapshot.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/query_basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/accuracy/test_query_accuracy.py
,,y,army,./pytest.sh python3 ./test.py -f insert/insert_basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f cluster/splitVgroupByLearner.py -N 3
,,n,army,python3 ./test.py -f cmdline/fullopt.py
,,n,army,python3 ./test.py -f query/show.py -N 3
,,n,army,python3 ./test.py -f alter/alterConfig.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/subquery/subqueryBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f storage/oneStageComp.py -N 3 -L 3 -D 1
,,y,army,./pytest.sh python3 ./test.py -f storage/compressBasic.py -N 3
#
# system test