Merge pull request #24796 from taosdata/coverage/TD-28602-3.0

coverage: snapshot.py and s3_basic.py add stop and start taosd
This commit is contained in:
Alex Duan 2024-02-23 14:31:37 +08:00 committed by GitHub
commit 7767a30d58
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 39 additions and 10 deletions

View File

@ -25,6 +25,7 @@ from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame.srvCtl import *
class TDTestCase(TBase):
@ -65,6 +66,21 @@ class TDTestCase(TBase):
sql = f"select avg(dc) from {self.db}.{self.stb}"
tdSql.checkFirstValue(sql, 200)
def alterReplica3(self):
sql = f"alter database {self.db} replica 3"
tdSql.execute(sql, show=True)
time.sleep(2)
sc.dnodeStop(2)
sc.dnodeStop(3)
time.sleep(5)
sc.dnodeStart(2)
sc.dnodeStart(3)
if self.waitTransactionZero() is False:
tdLog.exit(f"{sql} transaction not finished")
return False
return True
def doAction(self):
tdLog.info(f"do action.")
self.flushDb()
@ -81,7 +97,7 @@ class TDTestCase(TBase):
self.alterReplica(1)
self.checkAggCorrect()
self.compactDb()
self.alterReplica(3)
self.alterReplica3()
vgids = self.getVGroup(self.db)
selid = random.choice(vgids)

View File

@ -28,12 +28,12 @@ from frame import *
from frame.eos import *
#
# 192.168.1.52 MINIO S3 API KEY: MQCEIoaPGUs1mhXgpUAu:XTgpN2dEMInnYgqN4gj3G5zgb39ROtsisKKy0GFa
# 192.168.1.52 MINIO S3
#
'''
s3EndPoint http://192.168.1.52:9000
s3AccessKey MQCEIoaPGUs1mhXgpUAu:XTgpN2dEMInnYgqN4gj3G5zgb39ROtsisKKy0GFa
s3AccessKey 'zOgllR6bSnw2Ah3mCNel:cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX'
s3BucketName ci-bucket
s3UploadDelaySec 60
'''
@ -42,7 +42,7 @@ s3UploadDelaySec 60
class TDTestCase(TBase):
updatecfgDict = {
's3EndPoint': 'http://192.168.1.52:9000',
's3AccessKey': 'MQCEIoaPGUs1mhXgpUAu:XTgpN2dEMInnYgqN4gj3G5zgb39ROtsisKKy0GFa',
's3AccessKey': 'zOgllR6bSnw2Ah3mCNel:cdO7oXAu3Cqdb1rUdevFgJMi0LtRwCXdWKQx4bhX',
's3BucketName': 'ci-bucket',
's3BlockSize': '10240',
's3BlockCacheSize': '320',
@ -78,14 +78,27 @@ class TDTestCase(TBase):
self.trimDb(True)
rootPath = sc.clusterRootPath()
cmd = f"ls {rootPath}/dnode1/data20/vnode/vnode*/tsdb/*.data"
cmd = f"ls {rootPath}/dnode1/data2*/vnode/vnode*/tsdb/*.data"
tdLog.info(cmd)
loop = 0
while len(eos.runRetList(cmd)) > 0 and loop < 40:
time.sleep(5)
rets = []
while loop < 180:
time.sleep(3)
rets = eos.runRetList(cmd)
cnt = len(rets)
if cnt == 0:
tdLog.info("All data file upload to server over.")
break
self.trimDb(True)
tdLog.info(f"loop={loop} no upload {cnt} data files wait 3s retry ...")
if loop == 0:
sc.dnodeStop(1)
time.sleep(2)
sc.dnodeStart(1)
loop += 1
tdLog.info(f"loop={loop} wait 5s...")
if len(rets) > 0:
tdLog.exit(f"s3 can not upload all data to server. data files cnt={len(rets)} list={rets}")
def checkStreamCorrect(self):
sql = f"select count(*) from {self.db}.stm1"

View File

@ -33,14 +33,14 @@ class srvCtl:
# control server
#
# start
# start idx base is 1
def dnodeStart(self, idx):
if clusterDnodes.getModel() == 'cluster':
return clusterDnodes.starttaosd(idx)
return tdDnodes.starttaosd(idx)
# stop
# stop idx base is 1
def dnodeStop(self, idx):
if clusterDnodes.getModel() == 'cluster':
return clusterDnodes.stoptaosd(idx)