feat: s3Basic.py add
This commit is contained in:
parent
4b6286a662
commit
a29244f955
|
@ -8,7 +8,7 @@
|
||||||
"connection_pool_size": 8,
|
"connection_pool_size": 8,
|
||||||
"num_of_records_per_req": 4000,
|
"num_of_records_per_req": 4000,
|
||||||
"prepared_rand": 1000,
|
"prepared_rand": 1000,
|
||||||
"thread_count": 2,
|
"thread_count": 4,
|
||||||
"create_table_thread_count": 1,
|
"create_table_thread_count": 1,
|
||||||
"confirm_parameter_prompt": "no",
|
"confirm_parameter_prompt": "no",
|
||||||
"databases": [
|
"databases": [
|
||||||
|
@ -16,22 +16,28 @@
|
||||||
"dbinfo": {
|
"dbinfo": {
|
||||||
"name": "db",
|
"name": "db",
|
||||||
"drop": "yes",
|
"drop": "yes",
|
||||||
"vgroups": 2,
|
"vgroups": 4,
|
||||||
"replica": 1,
|
"replica": 1,
|
||||||
"duration":"15d",
|
"duration":"10d",
|
||||||
"flush_each_batch":"yes",
|
"s3_keeplocal":"30d",
|
||||||
"keep": "60d,100d,200d"
|
"s3_chunksize":"131072",
|
||||||
|
"tsdb_pagesize":"1",
|
||||||
|
"s3_compact":"1",
|
||||||
|
"wal_retention_size":"1",
|
||||||
|
"wal_retention_period":"1",
|
||||||
|
"flush_each_batch":"no",
|
||||||
|
"keep": "3650d"
|
||||||
},
|
},
|
||||||
"super_tables": [
|
"super_tables": [
|
||||||
{
|
{
|
||||||
"name": "stb",
|
"name": "stb",
|
||||||
"child_table_exists": "no",
|
"child_table_exists": "no",
|
||||||
"childtable_count": 2,
|
"childtable_count": 20,
|
||||||
"insert_rows": 2000000,
|
"insert_rows": 2000000,
|
||||||
"childtable_prefix": "d",
|
"childtable_prefix": "d",
|
||||||
"insert_mode": "taosc",
|
"insert_mode": "taosc",
|
||||||
"timestamp_step": 1000,
|
"timestamp_step": 1000,
|
||||||
"start_timestamp":"now-90d",
|
"start_timestamp": 1600000000000,
|
||||||
"columns": [
|
"columns": [
|
||||||
{ "type": "bool", "name": "bc"},
|
{ "type": "bool", "name": "bc"},
|
||||||
{ "type": "float", "name": "fc" },
|
{ "type": "float", "name": "fc" },
|
|
@ -0,0 +1,307 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
|
||||||
|
import taos
|
||||||
|
import frame
|
||||||
|
import frame.etool
|
||||||
|
import frame.eos
|
||||||
|
|
||||||
|
from frame.log import *
|
||||||
|
from frame.cases import *
|
||||||
|
from frame.sql import *
|
||||||
|
from frame.caseBase import *
|
||||||
|
from frame.srvCtl import *
|
||||||
|
from frame import *
|
||||||
|
from frame.eos import *
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# 192.168.1.52 MINIO S3
|
||||||
|
#
|
||||||
|
|
||||||
|
'''
|
||||||
|
s3EndPoint http://192.168.1.52:9000
|
||||||
|
s3AccessKey 'zOgllR6bSnw2Ah3mCNel:cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX'
|
||||||
|
s3BucketName ci-bucket
|
||||||
|
s3UploadDelaySec 60
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase(TBase):
|
||||||
|
updatecfgDict = {
|
||||||
|
's3EndPoint': 'http://192.168.1.52:9000',
|
||||||
|
's3AccessKey': 'zOgllR6bSnw2Ah3mCNel:cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX',
|
||||||
|
's3BucketName': 'ci-bucket',
|
||||||
|
's3PageCacheSize': '10240',
|
||||||
|
"s3UploadDelaySec": "10",
|
||||||
|
's3MigrateIntervalSec': '600',
|
||||||
|
's3MigrateEnabled': '1'
|
||||||
|
}
|
||||||
|
|
||||||
|
maxFileSize = 128 * 1014 * 1024
|
||||||
|
|
||||||
|
def insertData(self):
|
||||||
|
tdLog.info(f"insert data.")
|
||||||
|
# taosBenchmark run
|
||||||
|
json = etool.curFile(__file__, "s3Basic.json")
|
||||||
|
etool.benchMark(json=json)
|
||||||
|
|
||||||
|
tdSql.execute(f"use {self.db}")
|
||||||
|
# come from s3_basic.json
|
||||||
|
self.childtable_count = 20
|
||||||
|
self.insert_rows = 2000000
|
||||||
|
self.timestamp_step = 1000
|
||||||
|
|
||||||
|
def createStream(self, sname):
|
||||||
|
sql = f"create stream {sname} fill_history 1 into stm1 as select count(*) from {self.db}.{self.stb} interval(10s);"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
def migrateDbS3(self):
|
||||||
|
sql = f"s3migrate database {self.db}"
|
||||||
|
tdSql.execute(sql, show=True)
|
||||||
|
|
||||||
|
def checkDataFile(self, lines, maxFileSize):
|
||||||
|
# ls -l
|
||||||
|
# -rwxrwxrwx 1 root root 41652224 Apr 17 14:47 vnode2/tsdb/v2f1974ver47.3.data
|
||||||
|
overCnt = 0
|
||||||
|
for line in lines:
|
||||||
|
cols = line.split()
|
||||||
|
fileSize = int(cols[4])
|
||||||
|
fileName = cols[8]
|
||||||
|
if fileSize > maxFileSize:
|
||||||
|
tdLog.info(f"error, over maxFileSize({maxFileSize}), {line} ")
|
||||||
|
overCnt += 1
|
||||||
|
else:
|
||||||
|
tdLog.info(f"file = {fileName} size = {fileSize} ok")
|
||||||
|
|
||||||
|
return overCnt
|
||||||
|
|
||||||
|
def checkUploadToS3(self):
|
||||||
|
rootPath = sc.clusterRootPath()
|
||||||
|
cmd = f"ls -l {rootPath}/dnode*/data/vnode/vnode*/tsdb/*.data"
|
||||||
|
tdLog.info(cmd)
|
||||||
|
loop = 0
|
||||||
|
rets = []
|
||||||
|
overCnt = 0
|
||||||
|
while loop < 180:
|
||||||
|
time.sleep(3)
|
||||||
|
|
||||||
|
# check upload to s3
|
||||||
|
cnt = eos.runRetList(cmd)
|
||||||
|
cnt = len(rets)
|
||||||
|
if cnt == 0:
|
||||||
|
overCnt = 0
|
||||||
|
tdLog.info("All data file upload to server over.")
|
||||||
|
break
|
||||||
|
overCnt = self.checkDataFile(rets, self.maxFileSize)
|
||||||
|
if overCnt == 0:
|
||||||
|
uploadOK = True
|
||||||
|
tdLog.info(f"All data files({len(rets)}) size bellow {self.maxFileSize}, check upload to s3 ok.")
|
||||||
|
break
|
||||||
|
|
||||||
|
tdLog.info(f"loop={loop} no upload {cnt} data files wait 3s retry ...")
|
||||||
|
if loop == 3:
|
||||||
|
sc.dnodeStop(1)
|
||||||
|
time.sleep(2)
|
||||||
|
sc.dnodeStart(1)
|
||||||
|
loop += 1
|
||||||
|
# miggrate
|
||||||
|
self.migrateDbS3()
|
||||||
|
|
||||||
|
# check can pass
|
||||||
|
if overCnt > 0:
|
||||||
|
tdLog.exit(f"s3 have {overCnt} files over size.")
|
||||||
|
|
||||||
|
|
||||||
|
def doAction(self):
|
||||||
|
tdLog.info(f"do action.")
|
||||||
|
|
||||||
|
self.flushDb(show=True)
|
||||||
|
#self.compactDb(show=True)
|
||||||
|
|
||||||
|
# sleep 70s
|
||||||
|
self.migrateDbS3()
|
||||||
|
|
||||||
|
# check upload to s3
|
||||||
|
self.checkUploadToS3()
|
||||||
|
|
||||||
|
def checkStreamCorrect(self):
|
||||||
|
sql = f"select count(*) from {self.db}.stm1"
|
||||||
|
count = 0
|
||||||
|
for i in range(120):
|
||||||
|
tdSql.query(sql)
|
||||||
|
count = tdSql.getData(0, 0)
|
||||||
|
if count == 100000 or count == 100001:
|
||||||
|
return True
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}")
|
||||||
|
|
||||||
|
|
||||||
|
def checkCreateDb(self, keepLocal, chunkSize, compact):
|
||||||
|
# keyword
|
||||||
|
kw1 = kw2 = kw3 = ""
|
||||||
|
if keepLocal is not None:
|
||||||
|
kw1 = f"s3_keeplocal {keepLocal}"
|
||||||
|
if chunkSize is not None:
|
||||||
|
kw2 = f"s3_chunksize {chunkSize}"
|
||||||
|
if compact is not None:
|
||||||
|
kw3 = f"s3_compact {compact}"
|
||||||
|
|
||||||
|
sql = f" create database db1 duration 1h {kw1} {kw2} {kw3}"
|
||||||
|
tdSql.execute(sql, show=True)
|
||||||
|
#sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';"
|
||||||
|
sql = f"select * from information_schema.ins_databases where name='db1';"
|
||||||
|
tdSql.query(sql)
|
||||||
|
# 29 30 31 -> chunksize keeplocal compact
|
||||||
|
if chunkSize is not None:
|
||||||
|
tdSql.checkData(0, 29, chunkSize)
|
||||||
|
if keepLocal is not None:
|
||||||
|
keepLocalm = keepLocal * 24 * 60
|
||||||
|
tdSql.checkData(0, 30, f"{keepLocalm}m")
|
||||||
|
if compact is not None:
|
||||||
|
tdSql.checkData(0, 31, compact)
|
||||||
|
sql = "drop database db1"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
def checkExcept(self):
|
||||||
|
# errors
|
||||||
|
sqls = [
|
||||||
|
f"create database db2 s3_keeplocal -1",
|
||||||
|
f"create database db2 s3_keeplocal 0",
|
||||||
|
f"create database db2 s3_keeplocal 365001",
|
||||||
|
f"create database db2 s3_chunksize -1",
|
||||||
|
f"create database db2 s3_chunksize 0",
|
||||||
|
f"create database db2 s3_chunksize 900000000",
|
||||||
|
f"create database db2 s3_compact -1",
|
||||||
|
f"create database db2 s3_compact 100",
|
||||||
|
f"create database db2 duration 1d s3_keeplocal 1d"
|
||||||
|
]
|
||||||
|
tdSql.errors(sqls)
|
||||||
|
|
||||||
|
|
||||||
|
def checkBasic(self):
|
||||||
|
# create db
|
||||||
|
keeps = [1, 256, 1024, 365000, None]
|
||||||
|
chunks = [131072, 600000, 820000, 1048576, None]
|
||||||
|
comps = [0, 1, None]
|
||||||
|
|
||||||
|
for keep in keeps:
|
||||||
|
for chunk in chunks:
|
||||||
|
for comp in comps:
|
||||||
|
self.checkCreateDb(keep, chunk, comp)
|
||||||
|
|
||||||
|
|
||||||
|
# --checks3
|
||||||
|
idx = 1
|
||||||
|
taosd = sc.taosdFile(idx)
|
||||||
|
cfg = sc.dnodeCfgPath(idx)
|
||||||
|
cmd = f"{taosd} -c {cfg} --checks3"
|
||||||
|
output, error = eos.run(cmd)
|
||||||
|
#print(lines)
|
||||||
|
|
||||||
|
tips = [
|
||||||
|
"put object s3test.txt: success",
|
||||||
|
"listing bucket ci-bucket: success",
|
||||||
|
"get object s3test.txt: success",
|
||||||
|
"delete object s3test.txt: success"
|
||||||
|
]
|
||||||
|
pos = 0
|
||||||
|
for tip in tips:
|
||||||
|
pos = output.find(tip, pos)
|
||||||
|
#if pos == -1:
|
||||||
|
# tdLog.exit(f"checks3 failed not found {tip}. cmd={cmd} output={output}")
|
||||||
|
|
||||||
|
# except
|
||||||
|
self.checkExcept()
|
||||||
|
|
||||||
|
#
|
||||||
|
def preDb(self, vgroups):
|
||||||
|
vg = random.randint(1, vgroups)
|
||||||
|
sql = f"create database predb vgroups {vg}"
|
||||||
|
tdSql.execute(sql, show=True)
|
||||||
|
|
||||||
|
# history
|
||||||
|
def insertHistory(self):
|
||||||
|
tdLog.info(f"insert history data.")
|
||||||
|
# taosBenchmark run
|
||||||
|
json = etool.curFile(__file__, "s3Basic1.json")
|
||||||
|
etool.benchMark(json=json)
|
||||||
|
|
||||||
|
# come from s3_basic.json
|
||||||
|
self.childtable_count = 20
|
||||||
|
self.insert_rows = 2000000 + 2000000/2
|
||||||
|
self.timestamp_step = 500
|
||||||
|
|
||||||
|
# run
|
||||||
|
def run(self):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
self.sname = "stream1"
|
||||||
|
if eos.isArm64Cpu():
|
||||||
|
tdLog.success(f"{__file__} arm64 ignore executed")
|
||||||
|
else:
|
||||||
|
|
||||||
|
self.preDb(10)
|
||||||
|
|
||||||
|
# insert data
|
||||||
|
self.insertData()
|
||||||
|
|
||||||
|
# creat stream
|
||||||
|
self.createStream(self.sname)
|
||||||
|
|
||||||
|
# check insert data correct
|
||||||
|
self.checkInsertCorrect()
|
||||||
|
|
||||||
|
# save
|
||||||
|
self.snapshotAgg()
|
||||||
|
|
||||||
|
# do action
|
||||||
|
self.doAction()
|
||||||
|
|
||||||
|
# check save agg result correct
|
||||||
|
self.checkAggCorrect()
|
||||||
|
|
||||||
|
# check insert correct again
|
||||||
|
self.checkInsertCorrect()
|
||||||
|
|
||||||
|
# checkBasic
|
||||||
|
#self.checkBasic()
|
||||||
|
|
||||||
|
# check stream correct and drop stream
|
||||||
|
#self.checkStreamCorrect()
|
||||||
|
|
||||||
|
# drop stream
|
||||||
|
self.dropStream(self.sname)
|
||||||
|
|
||||||
|
# insert history disorder data
|
||||||
|
self.insertHistory()
|
||||||
|
self.checkInsertCorrect()
|
||||||
|
self.snapshotAgg()
|
||||||
|
self.doAction()
|
||||||
|
self.checkAggCorrect()
|
||||||
|
self.checkInsertCorrect(difCnt=20*999999)
|
||||||
|
|
||||||
|
# drop database and free s3 file
|
||||||
|
#self.dropDb()
|
||||||
|
|
||||||
|
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,66 @@
|
||||||
|
{
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"connection_pool_size": 8,
|
||||||
|
"num_of_records_per_req": 4000,
|
||||||
|
"prepared_rand": 1000,
|
||||||
|
"thread_count": 4,
|
||||||
|
"create_table_thread_count": 1,
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"databases": [
|
||||||
|
{
|
||||||
|
"dbinfo": {
|
||||||
|
"name": "db",
|
||||||
|
"drop": "no",
|
||||||
|
"vgroups": 4,
|
||||||
|
"replica": 1,
|
||||||
|
"duration":"10d",
|
||||||
|
"s3_keeplocal":"30d",
|
||||||
|
"s3_chunksize":"131072",
|
||||||
|
"tsdb_pagesize":"1",
|
||||||
|
"s3_compact":"1",
|
||||||
|
"wal_retention_size":"1",
|
||||||
|
"wal_retention_period":"1",
|
||||||
|
"flush_each_batch":"no",
|
||||||
|
"keep": "3650d"
|
||||||
|
},
|
||||||
|
"super_tables": [
|
||||||
|
{
|
||||||
|
"name": "stb",
|
||||||
|
"child_table_exists": "yes",
|
||||||
|
"childtable_count": 20,
|
||||||
|
"insert_rows": 2000000,
|
||||||
|
"childtable_prefix": "d",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"timestamp_step": 500,
|
||||||
|
"start_timestamp": 1600000000000,
|
||||||
|
"columns": [
|
||||||
|
{ "type": "bool", "name": "bc"},
|
||||||
|
{ "type": "float", "name": "fc" },
|
||||||
|
{ "type": "double", "name": "dc"},
|
||||||
|
{ "type": "tinyint", "name": "ti"},
|
||||||
|
{ "type": "smallint", "name": "si" },
|
||||||
|
{ "type": "int", "name": "ic" ,"max": 1,"min": 1},
|
||||||
|
{ "type": "bigint", "name": "bi" },
|
||||||
|
{ "type": "utinyint", "name": "uti"},
|
||||||
|
{ "type": "usmallint", "name": "usi"},
|
||||||
|
{ "type": "uint", "name": "ui" },
|
||||||
|
{ "type": "ubigint", "name": "ubi"},
|
||||||
|
{ "type": "binary", "name": "bin", "len": 32},
|
||||||
|
{ "type": "nchar", "name": "nch", "len": 64}
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
{"type": "tinyint", "name": "groupid","max": 10,"min": 1},
|
||||||
|
{"name": "location","type": "binary", "len": 16, "values":
|
||||||
|
["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -1,157 +0,0 @@
|
||||||
###################################################################
|
|
||||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This file is proprietary and confidential to TAOS Technologies.
|
|
||||||
# No part of this file may be reproduced, stored, transmitted,
|
|
||||||
# disclosed or used in any form or by any means other than as
|
|
||||||
# expressly provided by the written permission from Jianhui Tao
|
|
||||||
#
|
|
||||||
###################################################################
|
|
||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
import taos
|
|
||||||
import frame
|
|
||||||
import frame.etool
|
|
||||||
import frame.eos
|
|
||||||
|
|
||||||
from frame.log import *
|
|
||||||
from frame.cases import *
|
|
||||||
from frame.sql import *
|
|
||||||
from frame.caseBase import *
|
|
||||||
from frame.srvCtl import *
|
|
||||||
from frame import *
|
|
||||||
from frame.eos import *
|
|
||||||
|
|
||||||
#
|
|
||||||
# 192.168.1.52 MINIO S3
|
|
||||||
#
|
|
||||||
|
|
||||||
'''
|
|
||||||
s3EndPoint http://192.168.1.52:9000
|
|
||||||
s3AccessKey 'zOgllR6bSnw2Ah3mCNel:cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX'
|
|
||||||
s3BucketName ci-bucket
|
|
||||||
s3UploadDelaySec 60
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
class TDTestCase(TBase):
|
|
||||||
updatecfgDict = {
|
|
||||||
's3EndPoint': 'http://192.168.1.52:9000',
|
|
||||||
's3AccessKey': 'zOgllR6bSnw2Ah3mCNel:cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX',
|
|
||||||
's3BucketName': 'ci-bucket',
|
|
||||||
's3BlockSize': '10240',
|
|
||||||
's3BlockCacheSize': '320',
|
|
||||||
's3PageCacheSize': '10240',
|
|
||||||
's3UploadDelaySec':'60'
|
|
||||||
}
|
|
||||||
|
|
||||||
def insertData(self):
|
|
||||||
tdLog.info(f"insert data.")
|
|
||||||
# taosBenchmark run
|
|
||||||
json = etool.curFile(__file__, "s3_basic.json")
|
|
||||||
etool.benchMark(json=json)
|
|
||||||
|
|
||||||
tdSql.execute(f"use {self.db}")
|
|
||||||
# come from s3_basic.json
|
|
||||||
self.childtable_count = 2
|
|
||||||
self.insert_rows = 2000000
|
|
||||||
self.timestamp_step = 1000
|
|
||||||
|
|
||||||
def createStream(self, sname):
|
|
||||||
sql = f"create stream {sname} fill_history 1 into stm1 as select count(*) from {self.db}.{self.stb} interval(10s);"
|
|
||||||
tdSql.execute(sql)
|
|
||||||
|
|
||||||
def doAction(self):
|
|
||||||
tdLog.info(f"do action.")
|
|
||||||
|
|
||||||
self.flushDb()
|
|
||||||
self.compactDb()
|
|
||||||
|
|
||||||
# sleep 70s
|
|
||||||
tdLog.info(f"wait 65s ...")
|
|
||||||
time.sleep(65)
|
|
||||||
self.trimDb(True)
|
|
||||||
|
|
||||||
rootPath = sc.clusterRootPath()
|
|
||||||
cmd = f"ls {rootPath}/dnode1/data2*/vnode/vnode*/tsdb/*.data"
|
|
||||||
tdLog.info(cmd)
|
|
||||||
loop = 0
|
|
||||||
rets = []
|
|
||||||
while loop < 180:
|
|
||||||
time.sleep(3)
|
|
||||||
rets = eos.runRetList(cmd)
|
|
||||||
cnt = len(rets)
|
|
||||||
if cnt == 0:
|
|
||||||
tdLog.info("All data file upload to server over.")
|
|
||||||
break
|
|
||||||
self.trimDb(True)
|
|
||||||
tdLog.info(f"loop={loop} no upload {cnt} data files wait 3s retry ...")
|
|
||||||
if loop == 0:
|
|
||||||
sc.dnodeStop(1)
|
|
||||||
time.sleep(2)
|
|
||||||
sc.dnodeStart(1)
|
|
||||||
loop += 1
|
|
||||||
|
|
||||||
if len(rets) > 0:
|
|
||||||
tdLog.exit(f"s3 can not upload all data to server. data files cnt={len(rets)} list={rets}")
|
|
||||||
|
|
||||||
def checkStreamCorrect(self):
|
|
||||||
sql = f"select count(*) from {self.db}.stm1"
|
|
||||||
count = 0
|
|
||||||
for i in range(120):
|
|
||||||
tdSql.query(sql)
|
|
||||||
count = tdSql.getData(0, 0)
|
|
||||||
if count == 100000 or count == 100001:
|
|
||||||
return True
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}")
|
|
||||||
|
|
||||||
# run
|
|
||||||
def run(self):
|
|
||||||
tdLog.debug(f"start to excute {__file__}")
|
|
||||||
self.sname = "stream1"
|
|
||||||
if eos.isArm64Cpu():
|
|
||||||
tdLog.success(f"{__file__} arm64 ignore executed")
|
|
||||||
else:
|
|
||||||
# insert data
|
|
||||||
self.insertData()
|
|
||||||
|
|
||||||
# creat stream
|
|
||||||
self.createStream(self.sname)
|
|
||||||
|
|
||||||
# check insert data correct
|
|
||||||
self.checkInsertCorrect()
|
|
||||||
|
|
||||||
# save
|
|
||||||
self.snapshotAgg()
|
|
||||||
|
|
||||||
# do action
|
|
||||||
self.doAction()
|
|
||||||
|
|
||||||
# check save agg result correct
|
|
||||||
self.checkAggCorrect()
|
|
||||||
|
|
||||||
# check insert correct again
|
|
||||||
self.checkInsertCorrect()
|
|
||||||
|
|
||||||
# check stream correct and drop stream
|
|
||||||
#self.checkStreamCorrect()
|
|
||||||
|
|
||||||
# drop stream
|
|
||||||
self.dropStream(self.sname)
|
|
||||||
|
|
||||||
# drop database and free s3 file
|
|
||||||
self.dropDb()
|
|
||||||
|
|
||||||
tdLog.success(f"{__file__} successfully executed")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
tdCases.addLinux(__file__, TDTestCase())
|
|
||||||
tdCases.addWindows(__file__, TDTestCase())
|
|
|
@ -126,7 +126,7 @@ class TBase:
|
||||||
#
|
#
|
||||||
|
|
||||||
# basic
|
# basic
|
||||||
def checkInsertCorrect(self):
|
def checkInsertCorrect(self, difCnt = 0):
|
||||||
# check count
|
# check count
|
||||||
sql = f"select count(*) from {self.stb}"
|
sql = f"select count(*) from {self.stb}"
|
||||||
tdSql.checkAgg(sql, self.childtable_count * self.insert_rows)
|
tdSql.checkAgg(sql, self.childtable_count * self.insert_rows)
|
||||||
|
@ -136,9 +136,8 @@ class TBase:
|
||||||
tdSql.checkAgg(sql, self.childtable_count)
|
tdSql.checkAgg(sql, self.childtable_count)
|
||||||
|
|
||||||
# check step
|
# check step
|
||||||
sql = f"select * from (select diff(ts) as dif from {self.stb} partition by tbname order by ts desc) where dif != {self.timestamp_step}"
|
sql = f"select count(*) from (select diff(ts) as dif from {self.stb} partition by tbname order by ts desc) where dif != {self.timestamp_step}"
|
||||||
tdSql.query(sql)
|
tdSql.checkAgg(sql, difCnt)
|
||||||
tdSql.checkRows(0)
|
|
||||||
|
|
||||||
# save agg result
|
# save agg result
|
||||||
def snapshotAgg(self):
|
def snapshotAgg(self):
|
||||||
|
|
|
@ -146,6 +146,10 @@ class TDDnodes:
|
||||||
if index < 1 or index > 10:
|
if index < 1 or index > 10:
|
||||||
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||||
|
|
||||||
|
def taosdFile(self, index):
|
||||||
|
self.check(index)
|
||||||
|
return self.dnodes[index - 1].getPath()
|
||||||
|
|
||||||
def StopAllSigint(self):
|
def StopAllSigint(self):
|
||||||
tdLog.info("stop all dnodes sigint, asan:%d" % self.asan)
|
tdLog.info("stop all dnodes sigint, asan:%d" % self.asan)
|
||||||
if self.asan:
|
if self.asan:
|
||||||
|
|
|
@ -658,6 +658,7 @@ class TDSql:
|
||||||
def checkAgg(self, sql, expectCnt):
|
def checkAgg(self, sql, expectCnt):
|
||||||
self.query(sql)
|
self.query(sql)
|
||||||
self.checkData(0, 0, expectCnt)
|
self.checkData(0, 0, expectCnt)
|
||||||
|
tdLog.info(f"{sql} expect {expectCnt} ok.")
|
||||||
|
|
||||||
# expect first value
|
# expect first value
|
||||||
def checkFirstValue(self, sql, expect):
|
def checkFirstValue(self, sql, expect):
|
||||||
|
|
|
@ -62,6 +62,15 @@ class srvCtl:
|
||||||
return clusterDnodes.getDnodesRootDir()
|
return clusterDnodes.getDnodesRootDir()
|
||||||
|
|
||||||
return tdDnodes.getDnodesRootDir()
|
return tdDnodes.getDnodesRootDir()
|
||||||
|
|
||||||
|
# get taosd path
|
||||||
|
def taosdFile(self, idx):
|
||||||
|
if clusterDnodes.getModel() == 'cluster':
|
||||||
|
return clusterDnodes.taosdFile(idx)
|
||||||
|
|
||||||
|
return tdDnodes.taosdFile(idx)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# return dnode data files list
|
# return dnode data files list
|
||||||
def dnodeDataFiles(self, idx):
|
def dnodeDataFiles(self, idx):
|
||||||
|
|
|
@ -114,7 +114,7 @@ if __name__ == "__main__":
|
||||||
level = 1
|
level = 1
|
||||||
disk = 1
|
disk = 1
|
||||||
|
|
||||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWU:n:i:aP:L:D:', [
|
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWU:n:i:aPL:D:', [
|
||||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums',
|
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums',
|
||||||
'queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode',"asan",'previous','level','disk'])
|
'queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode',"asan",'previous','level','disk'])
|
||||||
for key, value in opts:
|
for key, value in opts:
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
# army-test
|
# army-test
|
||||||
#
|
#
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1
|
,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3Basic.py -L 3 -D 1
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f community/query/function/test_func_elapsed.py
|
,,y,army,./pytest.sh python3 ./test.py -f community/query/function/test_func_elapsed.py
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f community/query/fill/fill_desc.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f community/query/fill/fill_desc.py -N 3 -L 3 -D 2
|
||||||
|
|
Loading…
Reference in New Issue