feat: army folder create
This commit is contained in:
parent
16068bab41
commit
5ffca8772f
|
@ -0,0 +1,163 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
import threading
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Auto Gen class
|
||||||
|
#
|
||||||
|
class AutoGen:
|
||||||
|
def __init__(self):
|
||||||
|
self.ts = 1600000000000
|
||||||
|
self.batch_size = 100
|
||||||
|
seed = time.time() % 10000
|
||||||
|
random.seed(seed)
|
||||||
|
|
||||||
|
# set start ts
|
||||||
|
def set_start_ts(self, ts):
|
||||||
|
self.ts = ts
|
||||||
|
|
||||||
|
# set batch size
|
||||||
|
def set_batch_size(self, batch_size):
|
||||||
|
self.batch_size = batch_size
|
||||||
|
|
||||||
|
# _columns_sql
|
||||||
|
def gen_columns_sql(self, pre, cnt, binary_len, nchar_len):
|
||||||
|
types = [
|
||||||
|
'timestamp',
|
||||||
|
'tinyint',
|
||||||
|
'smallint',
|
||||||
|
'tinyint unsigned',
|
||||||
|
'smallint unsigned',
|
||||||
|
'int',
|
||||||
|
'bigint',
|
||||||
|
'int unsigned',
|
||||||
|
'bigint unsigned',
|
||||||
|
'float',
|
||||||
|
'double',
|
||||||
|
'bool',
|
||||||
|
f'varchar({binary_len})',
|
||||||
|
f'nchar({nchar_len})'
|
||||||
|
]
|
||||||
|
|
||||||
|
sqls = ""
|
||||||
|
metas = []
|
||||||
|
for i in range(cnt):
|
||||||
|
colname = f"{pre}{i}"
|
||||||
|
sel = i % len(types)
|
||||||
|
coltype = types[sel]
|
||||||
|
sql = f"{colname} {coltype}"
|
||||||
|
if sqls != "":
|
||||||
|
sqls += ","
|
||||||
|
sqls += sql
|
||||||
|
metas.append(sel)
|
||||||
|
|
||||||
|
return metas, sqls;
|
||||||
|
|
||||||
|
# gen tags data
|
||||||
|
def gen_data(self, i, marr):
|
||||||
|
datas = ""
|
||||||
|
for c in marr:
|
||||||
|
data = ""
|
||||||
|
if c == 0 : # timestamp
|
||||||
|
data = "%d" % (self.ts + i)
|
||||||
|
elif c <= 4 : # small
|
||||||
|
data = "%d"%(i%128)
|
||||||
|
elif c <= 8 : # int
|
||||||
|
data = f"{i}"
|
||||||
|
elif c <= 10 : # float
|
||||||
|
data = "%f"%(i+i/1000)
|
||||||
|
elif c <= 11 : # bool
|
||||||
|
data = "%d"%(i%2)
|
||||||
|
elif c == 12 : # binary
|
||||||
|
data = '"' + self.random_string(self.bin_len) + '"'
|
||||||
|
elif c == 13 : # binary
|
||||||
|
data = '"' + self.random_string(self.nch_len) + '"'
|
||||||
|
|
||||||
|
if datas != "":
|
||||||
|
datas += ","
|
||||||
|
datas += data
|
||||||
|
|
||||||
|
return datas
|
||||||
|
|
||||||
|
# generate specail wide random string
|
||||||
|
def random_string(self, count):
|
||||||
|
letters = string.ascii_letters
|
||||||
|
return ''.join(random.choice(letters) for i in range(count))
|
||||||
|
|
||||||
|
# create db
|
||||||
|
def create_db(self, dbname, vgroups = 2, replica = 1):
|
||||||
|
self.dbname = dbname
|
||||||
|
tdSql.execute(f'create database {dbname} vgroups {vgroups} replica {replica}')
|
||||||
|
tdSql.execute(f'use {dbname}')
|
||||||
|
|
||||||
|
# create table or stable
|
||||||
|
def create_stable(self, stbname, tag_cnt, column_cnt, binary_len, nchar_len):
|
||||||
|
self.bin_len = binary_len
|
||||||
|
self.nch_len = nchar_len
|
||||||
|
self.stbname = stbname
|
||||||
|
self.mtags, tags = self.gen_columns_sql("t", tag_cnt, binary_len, nchar_len)
|
||||||
|
self.mcols, cols = self.gen_columns_sql("c", column_cnt - 1, binary_len, nchar_len)
|
||||||
|
|
||||||
|
sql = f"create table {stbname} (ts timestamp, {cols}) tags({tags})"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# create child table
|
||||||
|
def create_child(self, stbname, prename, cnt):
|
||||||
|
self.child_cnt = cnt
|
||||||
|
self.child_name = prename
|
||||||
|
for i in range(cnt):
|
||||||
|
tags_data = self.gen_data(i, self.mtags)
|
||||||
|
sql = f"create table {prename}{i} using {stbname} tags({tags_data})"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
tdLog.info(f"create child tables {cnt} ok")
|
||||||
|
|
||||||
|
def insert_data_child(self, child_name, cnt, batch_size, step):
|
||||||
|
values = ""
|
||||||
|
print("insert child data")
|
||||||
|
ts = self.ts
|
||||||
|
|
||||||
|
# loop do
|
||||||
|
for i in range(cnt):
|
||||||
|
value = self.gen_data(i, self.mcols)
|
||||||
|
ts += step
|
||||||
|
values += f"({ts},{value}) "
|
||||||
|
if batch_size == 1 or (i > 0 and i % batch_size == 0) :
|
||||||
|
sql = f"insert into {child_name} values {values}"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
values = ""
|
||||||
|
|
||||||
|
# end batch
|
||||||
|
if values != "":
|
||||||
|
sql = f"insert into {child_name} values {values}"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
tdLog.info(f" insert data i={i}")
|
||||||
|
values = ""
|
||||||
|
|
||||||
|
tdLog.info(f" insert child data {child_name} finished, insert rows={cnt}")
|
||||||
|
|
||||||
|
# insert data
|
||||||
|
def insert_data(self, cnt):
|
||||||
|
for i in range(self.child_cnt):
|
||||||
|
name = f"{self.child_name}{i}"
|
||||||
|
self.insert_data_child(name, cnt, self.batch_size, 1)
|
||||||
|
|
||||||
|
tdLog.info(f" insert data ok, child table={self.child_cnt} insert rows={cnt}")
|
||||||
|
|
||||||
|
# insert same timestamp to all childs
|
||||||
|
def insert_samets(self, cnt):
|
||||||
|
for i in range(self.child_cnt):
|
||||||
|
name = f"{self.child_name}{i}"
|
||||||
|
self.insert_data_child(name, cnt, self.batch_size, 0)
|
||||||
|
|
||||||
|
tdLog.info(f" insert same timestamp ok, child table={self.child_cnt} insert rows={cnt}")
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
class DataBoundary:
|
||||||
|
def __init__(self):
|
||||||
|
self.TINYINT_BOUNDARY = [-128, 127]
|
||||||
|
self.SMALLINT_BOUNDARY = [-32768, 32767]
|
||||||
|
self.INT_BOUNDARY = [-2147483648, 2147483647]
|
||||||
|
self.BIGINT_BOUNDARY = [-9223372036854775808, 9223372036854775807]
|
||||||
|
self.UTINYINT_BOUNDARY = [0, 255]
|
||||||
|
self.USMALLINT_BOUNDARY = [0, 65535]
|
||||||
|
self.UINT_BOUNDARY = [0, 4294967295]
|
||||||
|
self.UBIGINT_BOUNDARY = [0, 18446744073709551615]
|
||||||
|
self.FLOAT_BOUNDARY = [-3.40E+38, 3.40E+38]
|
||||||
|
self.DOUBLE_BOUNDARY = [-1.7e+308, 1.7e+308]
|
||||||
|
self.BOOL_BOUNDARY = [True, False]
|
||||||
|
self.BINARY_MAX_LENGTH = 16374
|
||||||
|
self.NCHAR_MAX_LENGTH = 4093
|
||||||
|
self.DBNAME_MAX_LENGTH = 64
|
||||||
|
self.STBNAME_MAX_LENGTH = 192
|
||||||
|
self.TBNAME_MAX_LENGTH = 192
|
||||||
|
self.CHILD_TBNAME_MAX_LENGTH = 192
|
||||||
|
self.TAG_KEY_MAX_LENGTH = 64
|
||||||
|
self.COL_KEY_MAX_LENGTH = 64
|
||||||
|
self.MAX_TAG_COUNT = 128
|
||||||
|
self.MAX_TAG_COL_COUNT = 4096
|
||||||
|
self.mnodeShmSize = [6292480, 2147483647]
|
||||||
|
self.mnodeShmSize_default = 6292480
|
||||||
|
self.vnodeShmSize = [6292480, 2147483647]
|
||||||
|
self.vnodeShmSize_default = 31458304
|
||||||
|
self.DB_PARAM_BUFFER_CONFIG = {"create_name": "buffer", "query_name": "buffer", "vnode_json_key": "szBuf", "boundary": [3, 16384], "default": 96}
|
||||||
|
self.DB_PARAM_CACHELAST_CONFIG = {"create_name": "cachelast", "query_name": "cache_model", "vnode_json_key": "", "boundary": [0, 1, 2, 3], "default": 0}
|
||||||
|
self.DB_PARAM_COMP_CONFIG = {"create_name": "comp", "query_name": "compression", "vnode_json_key": "", "boundary": [0, 1, 2], "default": 2}
|
||||||
|
self.DB_PARAM_DURATION_CONFIG = {"create_name": "duration", "query_name": "duration", "vnode_json_key": "daysPerFile", "boundary": [1, 3650, '60m', '5256000m', '1h', '87600h', '1d', '3650d'], "default": "14400m"}
|
||||||
|
self.DB_PARAM_FSYNC_CONFIG = {"create_name": "fsync", "query_name": "fsync", "vnode_json_key": "", "boundary": [0, 180000], "default": 3000}
|
||||||
|
self.DB_PARAM_KEEP_CONFIG = {"create_name": "keep", "query_name": "fsync", "vnode_json_key": "", "boundary": [1, 365000,'1440m','525600000m','24h','8760000h','1d','365000d'], "default": "5256000m,5256000m,5256000m"}
|
||||||
|
self.DB_PARAM_MAXROWS_CONFIG = {"create_name": "maxrows", "query_name": "maxrows", "vnode_json_key": "maxRows", "boundary": [200, 10000], "default": 4096}
|
||||||
|
self.DB_PARAM_MINROWS_CONFIG = {"create_name": "minrows", "query_name": "minrows", "vnode_json_key": "minRows", "boundary": [10, 1000], "default": 100}
|
||||||
|
self.DB_PARAM_NTABLES_CONFIG = {"create_name": "ntables", "query_name": "ntables", "vnode_json_key": "", "boundary": 0, "default": 0}
|
||||||
|
self.DB_PARAM_PAGES_CONFIG = {"create_name": "pages", "query_name": "pages", "vnode_json_key": "szCache", "boundary": [64], "default": 256}
|
||||||
|
self.DB_PARAM_PAGESIZE_CONFIG = {"create_name": "pagesize", "query_name": "pagesize", "vnode_json_key": "szPage", "boundary": [1, 16384], "default": 4}
|
||||||
|
self.DB_PARAM_PRECISION_CONFIG = {"create_name": "precision", "query_name": "precision", "vnode_json_key": "", "boundary": ['ms', 'us', 'ns'], "default": "ms"}
|
||||||
|
self.DB_PARAM_REPLICA_CONFIG = {"create_name": "replica", "query_name": "replica", "vnode_json_key": "", "boundary": [1], "default": 1}
|
||||||
|
self.DB_PARAM_SINGLE_STABLE_CONFIG = {"create_name": "single_stable", "query_name": "single_stable_model", "vnode_json_key": "", "boundary": [0, 1], "default": 0}
|
||||||
|
self.DB_PARAM_STRICT_CONFIG = {"create_name": "strict", "query_name": "strict", "vnode_json_key": "", "boundary": {"off": 0, "strict": 1}, "default": "off"}
|
||||||
|
self.DB_PARAM_VGROUPS_CONFIG = {"create_name": "vgroups", "query_name": "vgroups", "vnode_json_key": "", "boundary": [1, 32], "default": 2}
|
||||||
|
self.DB_PARAM_WAL_CONFIG = {"create_name": "wal", "query_name": "wal", "vnode_json_key": "", "boundary": [1, 2], "default": 1}
|
|
@ -0,0 +1,150 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import datetime
|
||||||
|
import inspect
|
||||||
|
import importlib
|
||||||
|
import traceback
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDCase:
|
||||||
|
def __init__(self, name, case):
|
||||||
|
self.name = name
|
||||||
|
self.case = case
|
||||||
|
self._logSql = True
|
||||||
|
|
||||||
|
|
||||||
|
class TDCases:
|
||||||
|
def __init__(self):
|
||||||
|
self.linuxCases = []
|
||||||
|
self.windowsCases = []
|
||||||
|
self.clusterCases = []
|
||||||
|
|
||||||
|
def __dynamicLoadModule(self, fileName):
|
||||||
|
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||||
|
return importlib.import_module(moduleName, package='..')
|
||||||
|
|
||||||
|
def logSql(self, logSql):
|
||||||
|
self._logSql = logSql
|
||||||
|
|
||||||
|
def addWindows(self, name, case):
|
||||||
|
self.windowsCases.append(TDCase(name, case))
|
||||||
|
|
||||||
|
def addLinux(self, name, case):
|
||||||
|
self.linuxCases.append(TDCase(name, case))
|
||||||
|
|
||||||
|
def addCluster(self, name, case):
|
||||||
|
self.clusterCases.append(TDCase(name, case))
|
||||||
|
|
||||||
|
def runAllLinux(self, conn):
|
||||||
|
# TODO: load all Linux cases here
|
||||||
|
runNum = 0
|
||||||
|
for tmp in self.linuxCases:
|
||||||
|
if tmp.name.find(fileName) != -1:
|
||||||
|
case = testModule.TDTestCase()
|
||||||
|
case.init(conn)
|
||||||
|
case.run()
|
||||||
|
case.stop()
|
||||||
|
runNum += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
tdLog.info("total %d Linux test case(s) executed" % (runNum))
|
||||||
|
|
||||||
|
def runOneLinux(self, conn, fileName, replicaVar=1):
|
||||||
|
testModule = self.__dynamicLoadModule(fileName)
|
||||||
|
|
||||||
|
runNum = 0
|
||||||
|
for tmp in self.linuxCases:
|
||||||
|
if tmp.name.find(fileName) != -1:
|
||||||
|
case = testModule.TDTestCase()
|
||||||
|
case.init(conn, self._logSql, replicaVar)
|
||||||
|
try:
|
||||||
|
case.run()
|
||||||
|
except Exception as e:
|
||||||
|
tdLog.notice(repr(e))
|
||||||
|
traceback.print_exc()
|
||||||
|
tdLog.exit("%s failed" % (fileName))
|
||||||
|
case.stop()
|
||||||
|
runNum += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
def runAllWindows(self, conn):
|
||||||
|
# TODO: load all Windows cases here
|
||||||
|
runNum = 0
|
||||||
|
for tmp in self.windowsCases:
|
||||||
|
if tmp.name.find(fileName) != -1:
|
||||||
|
case = testModule.TDTestCase()
|
||||||
|
case.init(conn)
|
||||||
|
case.run()
|
||||||
|
case.stop()
|
||||||
|
runNum += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
tdLog.notice("total %d Windows test case(s) executed" % (runNum))
|
||||||
|
|
||||||
|
def runOneWindows(self, conn, fileName, replicaVar=1):
|
||||||
|
testModule = self.__dynamicLoadModule(fileName)
|
||||||
|
|
||||||
|
runNum = 0
|
||||||
|
for tmp in self.windowsCases:
|
||||||
|
if tmp.name.find(fileName) != -1:
|
||||||
|
case = testModule.TDTestCase()
|
||||||
|
case.init(conn, self._logSql,replicaVar)
|
||||||
|
try:
|
||||||
|
case.run()
|
||||||
|
except Exception as e:
|
||||||
|
tdLog.notice(repr(e))
|
||||||
|
tdLog.exit("%s failed" % (fileName))
|
||||||
|
case.stop()
|
||||||
|
runNum += 1
|
||||||
|
continue
|
||||||
|
tdLog.notice("total %d Windows case(s) executed" % (runNum))
|
||||||
|
|
||||||
|
def runAllCluster(self):
|
||||||
|
# TODO: load all cluster case module here
|
||||||
|
|
||||||
|
runNum = 0
|
||||||
|
for tmp in self.clusterCases:
|
||||||
|
if tmp.name.find(fileName) != -1:
|
||||||
|
tdLog.notice("run cases like %s" % (fileName))
|
||||||
|
case = testModule.TDTestCase()
|
||||||
|
case.init()
|
||||||
|
case.run()
|
||||||
|
case.stop()
|
||||||
|
runNum += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
tdLog.notice("total %d Cluster test case(s) executed" % (runNum))
|
||||||
|
|
||||||
|
def runOneCluster(self, fileName):
|
||||||
|
testModule = self.__dynamicLoadModule(fileName)
|
||||||
|
|
||||||
|
runNum = 0
|
||||||
|
for tmp in self.clusterCases:
|
||||||
|
if tmp.name.find(fileName) != -1:
|
||||||
|
tdLog.notice("run cases like %s" % (fileName))
|
||||||
|
case = testModule.TDTestCase()
|
||||||
|
case.init()
|
||||||
|
case.run()
|
||||||
|
case.stop()
|
||||||
|
runNum += 1
|
||||||
|
continue
|
||||||
|
|
||||||
|
tdLog.notice("total %d Cluster test case(s) executed" % (runNum))
|
||||||
|
|
||||||
|
|
||||||
|
tdCases = TDCases()
|
|
@ -0,0 +1,108 @@
|
||||||
|
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
class ClusterDnodes(TDDnodes):
|
||||||
|
"""rewrite TDDnodes and make MyDdnodes as TDDnodes child class"""
|
||||||
|
def __init__(self ,dnodes_lists):
|
||||||
|
|
||||||
|
super(ClusterDnodes,self).__init__()
|
||||||
|
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||||
|
self.simDeployed = False
|
||||||
|
self.testCluster = False
|
||||||
|
self.valgrind = 0
|
||||||
|
self.killValgrind = 1
|
||||||
|
|
||||||
|
|
||||||
|
class ConfigureyCluster:
|
||||||
|
"""This will create defined number of dnodes and create a cluster.
|
||||||
|
at the same time, it will return TDDnodes list: dnodes, """
|
||||||
|
hostname = socket.gethostname()
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.dnodes = []
|
||||||
|
self.dnodeNums = 5
|
||||||
|
self.independent = True
|
||||||
|
self.startPort = 6030
|
||||||
|
self.portStep = 100
|
||||||
|
self.mnodeNums = 0
|
||||||
|
|
||||||
|
def configure_cluster(self ,dnodeNums=5,mnodeNums=0,independentMnode=True,startPort=6030,portStep=100,hostname="%s"%hostname):
|
||||||
|
self.startPort=int(startPort)
|
||||||
|
self.portStep=int(portStep)
|
||||||
|
self.hostname=hostname
|
||||||
|
self.dnodeNums = int(dnodeNums)
|
||||||
|
self.mnodeNums = int(mnodeNums)
|
||||||
|
self.dnodes = []
|
||||||
|
startPort_sec = int(startPort+portStep)
|
||||||
|
for num in range(1, (self.dnodeNums+1)):
|
||||||
|
dnode = TDDnode(num)
|
||||||
|
dnode.addExtraCfg("firstEp", f"{hostname}:{self.startPort}")
|
||||||
|
dnode.addExtraCfg("fqdn", f"{hostname}")
|
||||||
|
dnode.addExtraCfg("serverPort", f"{self.startPort + (num-1)*self.portStep}")
|
||||||
|
dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}")
|
||||||
|
|
||||||
|
# configure dnoe of independent mnodes
|
||||||
|
if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True :
|
||||||
|
tdLog.info(f"set mnode:{num} supportVnodes 0")
|
||||||
|
dnode.addExtraCfg("supportVnodes", 0)
|
||||||
|
# print(dnode)
|
||||||
|
self.dnodes.append(dnode)
|
||||||
|
return self.dnodes
|
||||||
|
|
||||||
|
def create_dnode(self,conn,dnodeNum):
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
dnodeNum=int(dnodeNum)
|
||||||
|
for dnode in self.dnodes[1:dnodeNum]:
|
||||||
|
# print(dnode.cfgDict)
|
||||||
|
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||||
|
tdSql.execute(" create dnode '%s';"%dnode_id)
|
||||||
|
|
||||||
|
|
||||||
|
def create_mnode(self,conn,mnodeNums):
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
mnodeNums=int(mnodeNums)
|
||||||
|
for i in range(2,mnodeNums+1):
|
||||||
|
tdLog.info("create mnode on dnode %d"%i)
|
||||||
|
tdSql.execute(" create mnode on dnode %d;"%i)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def check_dnode(self,conn):
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
count=0
|
||||||
|
while count < 5:
|
||||||
|
tdSql.query("select * from information_schema.ins_dnodes")
|
||||||
|
# tdLog.debug(tdSql.queryResult)
|
||||||
|
status=0
|
||||||
|
for i in range(self.dnodeNums):
|
||||||
|
if tdSql.queryResult[i][4] == "ready":
|
||||||
|
status+=1
|
||||||
|
# tdLog.debug(status)
|
||||||
|
|
||||||
|
if status == self.dnodeNums:
|
||||||
|
tdLog.debug(" create cluster with %d dnode and check cluster dnode all ready within 5s! " %self.dnodeNums)
|
||||||
|
break
|
||||||
|
count+=1
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
tdLog.exit("create cluster with %d dnode but check dnode not ready within 5s ! "%self.dnodeNums)
|
||||||
|
|
||||||
|
def checkConnectStatus(self,dnodeNo,hostname=hostname):
|
||||||
|
dnodeNo = int(dnodeNo)
|
||||||
|
tdLog.info("check dnode-%d connection"%(dnodeNo+1))
|
||||||
|
hostname = socket.gethostname()
|
||||||
|
port = 6030 + dnodeNo*100
|
||||||
|
connectToDnode = tdCom.newcon(host=hostname,port=port)
|
||||||
|
return connectToDnode
|
||||||
|
|
||||||
|
cluster = ConfigureyCluster()
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,197 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# basic type
|
||||||
|
TAOS_DATA_TYPE = [
|
||||||
|
"INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED",
|
||||||
|
"FLOAT", "DOUBLE",
|
||||||
|
"BOOL",
|
||||||
|
"BINARY", "NCHAR", "VARCHAR",
|
||||||
|
"TIMESTAMP",
|
||||||
|
# "MEDIUMBLOB", "BLOB", # add in 3.x
|
||||||
|
# "DECIMAL", "NUMERIC", # add in 3.x
|
||||||
|
"JSON", # only for tag
|
||||||
|
]
|
||||||
|
|
||||||
|
TAOS_NUM_TYPE = [
|
||||||
|
"INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED", "FLOAT", "DOUBLE",
|
||||||
|
# "DECIMAL", "NUMERIC", # add in 3.x
|
||||||
|
]
|
||||||
|
TAOS_CHAR_TYPE = [
|
||||||
|
"BINARY", "NCHAR", "VARCHAR",
|
||||||
|
]
|
||||||
|
TAOS_BOOL_TYPE = ["BOOL",]
|
||||||
|
TAOS_TS_TYPE = ["TIMESTAMP",]
|
||||||
|
TAOS_BIN_TYPE = [
|
||||||
|
"MEDIUMBLOB", "BLOB", # add in 3.x
|
||||||
|
]
|
||||||
|
|
||||||
|
TAOS_TIME_INIT = ["b", "u", "a", "s", "m", "h", "d", "w", "n", "y"]
|
||||||
|
TAOS_PRECISION = ["ms", "us", "ns"]
|
||||||
|
PRECISION_DEFAULT = "ms"
|
||||||
|
PRECISION = PRECISION_DEFAULT
|
||||||
|
|
||||||
|
TAOS_KEYWORDS = [
|
||||||
|
"ABORT", "CREATE", "IGNORE", "NULL", "STAR",
|
||||||
|
"ACCOUNT", "CTIME", "IMMEDIATE", "OF", "STATE",
|
||||||
|
"ACCOUNTS", "DATABASE", "IMPORT", "OFFSET", "STATEMENT",
|
||||||
|
"ADD", "DATABASES", "IN", "OR", "STATE_WINDOW",
|
||||||
|
"AFTER", "DAYS", "INITIALLY", "ORDER", "STORAGE",
|
||||||
|
"ALL", "DBS", "INSERT", "PARTITIONS", "STREAM",
|
||||||
|
"ALTER", "DEFERRED", "INSTEAD", "PASS", "STREAMS",
|
||||||
|
"AND", "DELIMITERS", "INT", "PLUS", "STRING",
|
||||||
|
"AS", "DESC", "INTEGER", "PPS", "SYNCDB",
|
||||||
|
"ASC", "DESCRIBE", "INTERVAL", "PRECISION", "TABLE",
|
||||||
|
"ATTACH", "DETACH", "INTO", "PREV", "TABLES",
|
||||||
|
"BEFORE", "DISTINCT", "IS", "PRIVILEGE", "TAG",
|
||||||
|
"BEGIN", "DIVIDE", "ISNULL", "QTIME", "TAGS",
|
||||||
|
"BETWEEN", "DNODE", "JOIN", "QUERIES", "TBNAME",
|
||||||
|
"BIGINT", "DNODES", "KEEP", "QUERY", "TIMES",
|
||||||
|
"BINARY", "DOT", "KEY", "QUORUM", "TIMESTAMP",
|
||||||
|
"BITAND", "DOUBLE", "KILL", "RAISE", "TINYINT",
|
||||||
|
"BITNOT", "DROP", "LE", "REM", "TOPIC",
|
||||||
|
"BITOR", "EACH", "LIKE", "REPLACE", "TOPICS",
|
||||||
|
"BLOCKS", "END", "LIMIT", "REPLICA", "TRIGGER",
|
||||||
|
"BOOL", "EQ", "LINEAR", "RESET", "TSERIES",
|
||||||
|
"BY", "EXISTS", "LOCAL", "RESTRICT", "UMINUS",
|
||||||
|
"CACHE", "EXPLAIN", "LP", "ROW", "UNION",
|
||||||
|
"CACHEMODEL", "FAIL", "LSHIFT", "RP", "UNSIGNED",
|
||||||
|
"CASCADE", "FILE", "LT", "RSHIFT", "UPDATE",
|
||||||
|
"CHANGE", "FILL", "MATCH", "SCORES", "UPLUS",
|
||||||
|
"CLUSTER", "FLOAT", "MAXROWS", "SELECT", "USE",
|
||||||
|
"COLON", "FOR", "MINROWS", "SEMI", "USER",
|
||||||
|
"COLUMN", "FROM", "MINUS", "SESSION", "USERS",
|
||||||
|
"COMMA", "FSYNC", "MNODES", "SET", "USING",
|
||||||
|
"COMP", "GE", "MODIFY", "SHOW", "VALUES",
|
||||||
|
"COMPACT", "GLOB", "MODULES", "SLASH", "VARIABLE",
|
||||||
|
"CONCAT", "GRANTS", "NCHAR", "SLIDING", "VARIABLES",
|
||||||
|
"CONFLICT", "GROUP", "NE", "SLIMIT", "VGROUPS",
|
||||||
|
"CONNECTION", "GT", "NONE", "SMALLINT", "VIEW",
|
||||||
|
"CONNECTIONS", "HAVING", "NOT", "SOFFSET", "VNODES",
|
||||||
|
"CONNS", "ID", "NOTNULL", "STABLE", "WAL",
|
||||||
|
"COPY", "IF", "NOW", "STABLES", "WHERE",
|
||||||
|
]
|
||||||
|
|
||||||
|
NUM_FUNC = [
|
||||||
|
"ABS", "ACOS", "ASIN", "ATAN", "CEIL", "COS", "FLOOR", "LOG", "POW", "ROUND", "SIN", "SQRT", "TAN",
|
||||||
|
]
|
||||||
|
|
||||||
|
STR_FUNC = [
|
||||||
|
"CHAR_LENGTH", "CONCAT", "CONCAT_WS", "LENGTH", "LOWER","LTRIM", "RTRIM", "SUBSTR", "UPPER",
|
||||||
|
]
|
||||||
|
|
||||||
|
CONVER_FUNC = ["CASR", "TO_ISO8601", "TO_JSON", "TP_UNIXTIMESTAMP"]
|
||||||
|
|
||||||
|
SELECT_FUNC = [
|
||||||
|
"APERCENTILE", "BOTTOM", "FIRST", "INTERP", "LAST", "MAX", "MIN", "PERCENTILE", "TAIL", "TOP", "UNIQUE",
|
||||||
|
]
|
||||||
|
|
||||||
|
AGG_FUNC = [
|
||||||
|
"AVG", "COUNT", "ELAPSED", "LEASTSQUARES", "MODE", "SPREAD", "STDDEV", "SUM", "HYPERLOGLOG", "HISTOGRAM",
|
||||||
|
]
|
||||||
|
|
||||||
|
TS_FUNC = [
|
||||||
|
"CSUM", "DERIVATIVE", "DIFF", "IRATE", "MAVG", "SAMPLE", "STATECOUNT", "STATEDURATION", "TWA"
|
||||||
|
]
|
||||||
|
|
||||||
|
SYSINFO_FUNC = [
|
||||||
|
"DATABASE", "CLIENT_VERSION", "SERVER_VERSION", "SERVER_STATUS", "CURRENT_USER", "USER"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# basic data type boundary
|
||||||
|
TINYINT_MAX = 127
|
||||||
|
TINYINT_MIN = -128
|
||||||
|
|
||||||
|
TINYINT_UN_MAX = 255
|
||||||
|
TINYINT_UN_MIN = 0
|
||||||
|
|
||||||
|
SMALLINT_MAX = 32767
|
||||||
|
SMALLINT_MIN = -32768
|
||||||
|
|
||||||
|
SMALLINT_UN_MAX = 65535
|
||||||
|
SMALLINT_UN_MIN = 0
|
||||||
|
|
||||||
|
INT_MAX = 2_147_483_647
|
||||||
|
INT_MIN = -2_147_483_648
|
||||||
|
|
||||||
|
INT_UN_MAX = 4_294_967_295
|
||||||
|
INT_UN_MIN = 0
|
||||||
|
|
||||||
|
BIGINT_MAX = 9_223_372_036_854_775_807
|
||||||
|
BIGINT_MIN = -9_223_372_036_854_775_808
|
||||||
|
|
||||||
|
BIGINT_UN_MAX = 18_446_744_073_709_551_615
|
||||||
|
BIGINT_UN_MIN = 0
|
||||||
|
|
||||||
|
FLOAT_MAX = 3.40E+38
|
||||||
|
FLOAT_MIN = -3.40E+38
|
||||||
|
|
||||||
|
DOUBLE_MAX = 1.7E+308
|
||||||
|
DOUBLE_MIN = -1.7E+308
|
||||||
|
|
||||||
|
# schema boundary
|
||||||
|
BINARY_LENGTH_MAX = 16374
|
||||||
|
NCAHR_LENGTH_MAX = 4093
|
||||||
|
DBNAME_LENGTH_MAX = 64
|
||||||
|
|
||||||
|
STBNAME_LENGTH_MAX = 192
|
||||||
|
STBNAME_LENGTH_MIN = 1
|
||||||
|
|
||||||
|
TBNAME_LENGTH_MAX = 192
|
||||||
|
TBNAME_LENGTH_MIN = 1
|
||||||
|
|
||||||
|
CHILD_TBNAME_LENGTH_MAX = 192
|
||||||
|
CHILD_TBNAME_LENGTH_MIN = 1
|
||||||
|
|
||||||
|
TAG_NAME_LENGTH_MAX = 64
|
||||||
|
TAG_NAME_LENGTH_MIN = 1
|
||||||
|
|
||||||
|
COL_NAME_LENGTH_MAX = 64
|
||||||
|
COL_NAME_LENGTH_MIN = 1
|
||||||
|
|
||||||
|
TAG_COUNT_MAX = 128
|
||||||
|
TAG_COUNT_MIN = 1
|
||||||
|
|
||||||
|
COL_COUNT_MAX = 4096
|
||||||
|
COL_COUNT_MIN = 2
|
||||||
|
|
||||||
|
TAG_COL_COUNT_MAX = 4096
|
||||||
|
TAG_COL_COUNT_MIN = 3
|
||||||
|
|
||||||
|
MNODE_SHM_SIZE_MAX = 2_147_483_647
|
||||||
|
MNODE_SHM_SIZE_MIN = 6_292_480
|
||||||
|
MNODE_SHM_SIZE_DEFAULT = 6_292_480
|
||||||
|
|
||||||
|
VNODE_SHM_SIZE_MAX = 2_147_483_647
|
||||||
|
VNODE_SHM_SIZE_MIN = 6_292_480
|
||||||
|
VNODE_SHM_SIZE_DEFAULT = 31_458_304
|
||||||
|
|
||||||
|
# time_init
|
||||||
|
TIME_MS = 1
|
||||||
|
TIME_US = TIME_MS/1000
|
||||||
|
TIME_NS = TIME_US/1000
|
||||||
|
|
||||||
|
TIME_S = 1000 * TIME_MS
|
||||||
|
TIME_M = 60 * TIME_S
|
||||||
|
TIME_H = 60 * TIME_M
|
||||||
|
TIME_D = 24 * TIME_H
|
||||||
|
TIME_W = 7 * TIME_D
|
||||||
|
TIME_N = 30 * TIME_D
|
||||||
|
TIME_Y = 365 * TIME_D
|
||||||
|
|
||||||
|
|
||||||
|
# session parameters
|
||||||
|
INTERVAL_MIN = 1 * TIME_MS if PRECISION == PRECISION_DEFAULT else 1 * TIME_US
|
||||||
|
|
||||||
|
|
||||||
|
# streams and related agg-function
|
||||||
|
SMA_INDEX_FUNCTIONS = ["MIN", "MAX"]
|
||||||
|
ROLLUP_FUNCTIONS = ["AVG", "SUM", "MIN", "MAX", "LAST", "FIRST"]
|
||||||
|
BLOCK_FUNCTIONS = ["SUM", "MIN", "MAX"]
|
||||||
|
SMA_WATMARK_MAXDELAY_INIT = ['a', "s", "m"]
|
||||||
|
WATERMARK_MAX = 900000
|
||||||
|
WATERMARK_MIN = 0
|
||||||
|
|
||||||
|
MAX_DELAY_MAX = 900000
|
||||||
|
MAX_DELAY_MIN = 1
|
|
@ -0,0 +1,502 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDSimClient:
|
||||||
|
def __init__(self):
|
||||||
|
self.testCluster = False
|
||||||
|
|
||||||
|
self.cfgDict = {
|
||||||
|
"numOfLogLines": "100000000",
|
||||||
|
"numOfThreadsPerCore": "2.0",
|
||||||
|
"locale": "en_US.UTF-8",
|
||||||
|
"charset": "UTF-8",
|
||||||
|
"asyncLog": "0",
|
||||||
|
"minTablesPerVnode": "4",
|
||||||
|
"maxTablesPerVnode": "1000",
|
||||||
|
"tableIncStepPerVnode": "10000",
|
||||||
|
"maxVgroupsPerDb": "1000",
|
||||||
|
"sdbDebugFlag": "143",
|
||||||
|
"rpcDebugFlag": "135",
|
||||||
|
"tmrDebugFlag": "131",
|
||||||
|
"cDebugFlag": "135",
|
||||||
|
"udebugFlag": "135",
|
||||||
|
"jnidebugFlag": "135",
|
||||||
|
"qdebugFlag": "135",
|
||||||
|
"telemetryReporting": "0",
|
||||||
|
}
|
||||||
|
def init(self, path):
|
||||||
|
self.__init__()
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def getLogDir(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
return self.logDir
|
||||||
|
|
||||||
|
def getCfgDir(self):
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
return self.cfgDir
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def addExtraCfg(self, option, value):
|
||||||
|
self.cfgDict.update({option: value})
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def deploy(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
|
||||||
|
for key, value in self.cfgDict.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
|
||||||
|
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnode:
|
||||||
|
def __init__(self, index):
|
||||||
|
self.index = index
|
||||||
|
self.running = 0
|
||||||
|
self.deployed = 0
|
||||||
|
self.testCluster = False
|
||||||
|
self.valgrind = 0
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def getDataSize(self):
|
||||||
|
totalSize = 0
|
||||||
|
|
||||||
|
if (self.deployed == 1):
|
||||||
|
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||||
|
for f in filenames:
|
||||||
|
fp = os.path.join(dirpath, f)
|
||||||
|
|
||||||
|
if not os.path.islink(fp):
|
||||||
|
totalSize = totalSize + os.path.getsize(fp)
|
||||||
|
|
||||||
|
return totalSize
|
||||||
|
|
||||||
|
def deploy(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||||
|
self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.startIP()
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("dataDir", self.dataDir)
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
self.cfg("numOfLogLines", "100000000")
|
||||||
|
self.cfg("mnodeEqualVnodeNum", "0")
|
||||||
|
self.cfg("walLevel", "2")
|
||||||
|
self.cfg("fsync", "1000")
|
||||||
|
self.cfg("statusInterval", "1")
|
||||||
|
self.cfg("numOfMnodes", "3")
|
||||||
|
self.cfg("numOfThreadsPerCore", "2.0")
|
||||||
|
self.cfg("monitor", "0")
|
||||||
|
self.cfg("maxVnodeConnections", "30000")
|
||||||
|
self.cfg("maxMgmtConnections", "30000")
|
||||||
|
self.cfg("maxMeterConnections", "30000")
|
||||||
|
self.cfg("maxShellConns", "30000")
|
||||||
|
self.cfg("locale", "en_US.UTF-8")
|
||||||
|
self.cfg("charset", "UTF-8")
|
||||||
|
self.cfg("asyncLog", "0")
|
||||||
|
self.cfg("anyIp", "0")
|
||||||
|
self.cfg("dDebugFlag", "135")
|
||||||
|
self.cfg("mDebugFlag", "135")
|
||||||
|
self.cfg("sdbDebugFlag", "135")
|
||||||
|
self.cfg("rpcDebugFlag", "135")
|
||||||
|
self.cfg("tmrDebugFlag", "131")
|
||||||
|
self.cfg("cDebugFlag", "135")
|
||||||
|
self.cfg("httpDebugFlag", "135")
|
||||||
|
self.cfg("monitorDebugFlag", "135")
|
||||||
|
self.cfg("udebugFlag", "135")
|
||||||
|
self.cfg("jnidebugFlag", "135")
|
||||||
|
self.cfg("qdebugFlag", "135")
|
||||||
|
self.deployed = 1
|
||||||
|
tdLog.debug(
|
||||||
|
"dnode:%d is deployed and configured by %s" %
|
||||||
|
(self.index, self.cfgPath))
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
|
||||||
|
binPath = buildPath + "/build/bin/taosd"
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||||
|
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
|
||||||
|
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||||
|
|
||||||
|
def forcestop(self):
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||||
|
|
||||||
|
def startIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def stopIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||||
|
self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodeRootDir(self, index):
|
||||||
|
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||||
|
return dnodeRootDir
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnodes:
|
||||||
|
def __init__(self):
|
||||||
|
self.dnodes = []
|
||||||
|
self.dnodes.append(TDDnode(1))
|
||||||
|
self.dnodes.append(TDDnode(2))
|
||||||
|
self.dnodes.append(TDDnode(3))
|
||||||
|
self.dnodes.append(TDDnode(4))
|
||||||
|
self.dnodes.append(TDDnode(5))
|
||||||
|
self.dnodes.append(TDDnode(6))
|
||||||
|
self.dnodes.append(TDDnode(7))
|
||||||
|
self.dnodes.append(TDDnode(8))
|
||||||
|
self.dnodes.append(TDDnode(9))
|
||||||
|
self.dnodes.append(TDDnode(10))
|
||||||
|
self.simDeployed = False
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
binPath = binPath + "/../../../debug/"
|
||||||
|
tdLog.debug("binPath %s" % (binPath))
|
||||||
|
binPath = os.path.realpath(binPath)
|
||||||
|
tdLog.debug("binPath real path %s" % (binPath))
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||||
|
# tdLog.debug(cmd)
|
||||||
|
# os.system(cmd)
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
# tdLog.debug("execute %s" % (cmd))
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
# tdLog.debug("execute %s" % (cmd))
|
||||||
|
|
||||||
|
if path == "":
|
||||||
|
# self.path = os.path.expanduser('~')
|
||||||
|
self.path = os.path.abspath(binPath + "../../")
|
||||||
|
else:
|
||||||
|
self.path = os.path.realpath(path)
|
||||||
|
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].init(self.path)
|
||||||
|
|
||||||
|
self.sim = TDSimClient()
|
||||||
|
self.sim.init(self.path)
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def deploy(self, index):
|
||||||
|
self.sim.setTestCluster(self.testCluster)
|
||||||
|
|
||||||
|
if (self.simDeployed == False):
|
||||||
|
self.sim.deploy()
|
||||||
|
self.simDeployed = True
|
||||||
|
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||||
|
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||||
|
self.dnodes[index - 1].deploy()
|
||||||
|
|
||||||
|
def cfg(self, index, option, value):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].cfg(option, value)
|
||||||
|
|
||||||
|
def start(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].start()
|
||||||
|
|
||||||
|
def stop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].stop()
|
||||||
|
|
||||||
|
def getDataSize(self, index):
|
||||||
|
self.check(index)
|
||||||
|
return self.dnodes[index - 1].getDataSize()
|
||||||
|
|
||||||
|
def forcestop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].forcestop()
|
||||||
|
|
||||||
|
def startIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.dnodes[index - 1].startIP()
|
||||||
|
|
||||||
|
def stopIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.dnodes[index - 1].testCluster:
|
||||||
|
self.dnodes[index - 1].stopIP()
|
||||||
|
|
||||||
|
def check(self, index):
|
||||||
|
if index < 1 or index > 10:
|
||||||
|
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||||
|
|
||||||
|
def stopAll(self):
|
||||||
|
tdLog.info("stop all dnodes")
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].stop()
|
||||||
|
|
||||||
|
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
if processID:
|
||||||
|
cmd = "sudo systemctl stop taosd"
|
||||||
|
os.system(cmd)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = "%s/sim" % (self.path)
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
def getSimCfgPath(self):
|
||||||
|
return self.sim.getCfgDir()
|
||||||
|
|
||||||
|
def getSimLogPath(self):
|
||||||
|
return self.sim.getLogDir()
|
||||||
|
|
||||||
|
def addSimExtraCfg(self, option, value):
|
||||||
|
self.sim.addExtraCfg(option, value)
|
||||||
|
|
||||||
|
|
||||||
|
tdDnodes = TDDnodes()
|
|
@ -0,0 +1,500 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDSimClient:
|
||||||
|
def __init__(self):
|
||||||
|
self.testCluster = False
|
||||||
|
|
||||||
|
self.cfgDict = {
|
||||||
|
"numOfLogLines": "100000000",
|
||||||
|
"numOfThreadsPerCore": "2.0",
|
||||||
|
"locale": "en_US.UTF-8",
|
||||||
|
"charset": "UTF-8",
|
||||||
|
"asyncLog": "0",
|
||||||
|
"anyIp": "0",
|
||||||
|
"sdbDebugFlag": "135",
|
||||||
|
"rpcDebugFlag": "135",
|
||||||
|
"tmrDebugFlag": "131",
|
||||||
|
"cDebugFlag": "135",
|
||||||
|
"udebugFlag": "135",
|
||||||
|
"jnidebugFlag": "135",
|
||||||
|
"qdebugFlag": "135",
|
||||||
|
"telemetryReporting": "0",
|
||||||
|
}
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
self.__init__()
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def getLogDir(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
return self.logDir
|
||||||
|
|
||||||
|
def getCfgDir(self):
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
return self.cfgDir
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def addExtraCfg(self, option, value):
|
||||||
|
self.cfgDict.update({option: value})
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def deploy(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
|
||||||
|
for key, value in self.cfgDict.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
|
||||||
|
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnode:
|
||||||
|
def __init__(self, index):
|
||||||
|
self.index = index
|
||||||
|
self.running = 0
|
||||||
|
self.deployed = 0
|
||||||
|
self.testCluster = False
|
||||||
|
self.valgrind = 0
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def getDataSize(self):
|
||||||
|
totalSize = 0
|
||||||
|
|
||||||
|
if (self.deployed == 1):
|
||||||
|
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||||
|
for f in filenames:
|
||||||
|
fp = os.path.join(dirpath, f)
|
||||||
|
|
||||||
|
if not os.path.islink(fp):
|
||||||
|
totalSize = totalSize + os.path.getsize(fp)
|
||||||
|
|
||||||
|
return totalSize
|
||||||
|
|
||||||
|
def deploy(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||||
|
self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.startIP()
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("dataDir", self.dataDir)
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
self.cfg("numOfLogLines", "100000000")
|
||||||
|
self.cfg("mnodeEqualVnodeNum", "0")
|
||||||
|
self.cfg("walLevel", "2")
|
||||||
|
self.cfg("fsync", "1000")
|
||||||
|
self.cfg("statusInterval", "1")
|
||||||
|
self.cfg("numOfMnodes", "3")
|
||||||
|
self.cfg("numOfThreadsPerCore", "2.0")
|
||||||
|
self.cfg("monitor", "0")
|
||||||
|
self.cfg("maxVnodeConnections", "30000")
|
||||||
|
self.cfg("maxMgmtConnections", "30000")
|
||||||
|
self.cfg("maxMeterConnections", "30000")
|
||||||
|
self.cfg("maxShellConns", "30000")
|
||||||
|
self.cfg("locale", "en_US.UTF-8")
|
||||||
|
self.cfg("charset", "UTF-8")
|
||||||
|
self.cfg("asyncLog", "0")
|
||||||
|
self.cfg("anyIp", "0")
|
||||||
|
self.cfg("dDebugFlag", "135")
|
||||||
|
self.cfg("mDebugFlag", "135")
|
||||||
|
self.cfg("sdbDebugFlag", "135")
|
||||||
|
self.cfg("rpcDebugFlag", "135")
|
||||||
|
self.cfg("tmrDebugFlag", "131")
|
||||||
|
self.cfg("cDebugFlag", "135")
|
||||||
|
self.cfg("httpDebugFlag", "135")
|
||||||
|
self.cfg("monitorDebugFlag", "135")
|
||||||
|
self.cfg("udebugFlag", "135")
|
||||||
|
self.cfg("jnidebugFlag", "135")
|
||||||
|
self.cfg("qdebugFlag", "135")
|
||||||
|
self.deployed = 1
|
||||||
|
tdLog.debug(
|
||||||
|
"dnode:%d is deployed and configured by %s" %
|
||||||
|
(self.index, self.cfgPath))
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
|
||||||
|
binPath = buildPath + "/build/bin/taosd"
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
cmd = "nohup %s -c %s --random-file-fail-factor 0 > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||||
|
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
|
||||||
|
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||||
|
|
||||||
|
def forcestop(self):
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||||
|
|
||||||
|
def startIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def stopIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||||
|
self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodeRootDir(self, index):
|
||||||
|
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||||
|
return dnodeRootDir
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnodes:
|
||||||
|
def __init__(self):
|
||||||
|
self.dnodes = []
|
||||||
|
self.dnodes.append(TDDnode(1))
|
||||||
|
self.dnodes.append(TDDnode(2))
|
||||||
|
self.dnodes.append(TDDnode(3))
|
||||||
|
self.dnodes.append(TDDnode(4))
|
||||||
|
self.dnodes.append(TDDnode(5))
|
||||||
|
self.dnodes.append(TDDnode(6))
|
||||||
|
self.dnodes.append(TDDnode(7))
|
||||||
|
self.dnodes.append(TDDnode(8))
|
||||||
|
self.dnodes.append(TDDnode(9))
|
||||||
|
self.dnodes.append(TDDnode(10))
|
||||||
|
self.simDeployed = False
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
binPath = binPath + "/../../../debug/"
|
||||||
|
tdLog.debug("binPath %s" % (binPath))
|
||||||
|
binPath = os.path.realpath(binPath)
|
||||||
|
tdLog.debug("binPath real path %s" % (binPath))
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||||
|
# tdLog.debug(cmd)
|
||||||
|
# os.system(cmd)
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
# tdLog.debug("execute %s" % (cmd))
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
# tdLog.debug("execute %s" % (cmd))
|
||||||
|
|
||||||
|
if path == "":
|
||||||
|
# self.path = os.path.expanduser('~')
|
||||||
|
self.path = os.path.abspath(binPath + "../../")
|
||||||
|
else:
|
||||||
|
self.path = os.path.realpath(path)
|
||||||
|
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].init(self.path)
|
||||||
|
|
||||||
|
self.sim = TDSimClient()
|
||||||
|
self.sim.init(self.path)
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def deploy(self, index):
|
||||||
|
self.sim.setTestCluster(self.testCluster)
|
||||||
|
|
||||||
|
if (self.simDeployed == False):
|
||||||
|
self.sim.deploy()
|
||||||
|
self.simDeployed = True
|
||||||
|
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||||
|
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||||
|
self.dnodes[index - 1].deploy()
|
||||||
|
|
||||||
|
def cfg(self, index, option, value):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].cfg(option, value)
|
||||||
|
|
||||||
|
def start(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].start()
|
||||||
|
|
||||||
|
def stop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].stop()
|
||||||
|
|
||||||
|
def getDataSize(self, index):
|
||||||
|
self.check(index)
|
||||||
|
return self.dnodes[index - 1].getDataSize()
|
||||||
|
|
||||||
|
def forcestop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].forcestop()
|
||||||
|
|
||||||
|
def startIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.dnodes[index - 1].startIP()
|
||||||
|
|
||||||
|
def stopIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.dnodes[index - 1].testCluster:
|
||||||
|
self.dnodes[index - 1].stopIP()
|
||||||
|
|
||||||
|
def check(self, index):
|
||||||
|
if index < 1 or index > 10:
|
||||||
|
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||||
|
|
||||||
|
def stopAll(self):
|
||||||
|
tdLog.info("stop all dnodes")
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].stop()
|
||||||
|
|
||||||
|
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
if processID:
|
||||||
|
cmd = "sudo systemctl stop taosd"
|
||||||
|
os.system(cmd)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = "%s/sim" % (self.path)
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
def getSimCfgPath(self):
|
||||||
|
return self.sim.getCfgDir()
|
||||||
|
|
||||||
|
def getSimLogPath(self):
|
||||||
|
return self.sim.getLogDir()
|
||||||
|
|
||||||
|
def addSimExtraCfg(self, option, value):
|
||||||
|
self.sim.addExtraCfg(option, value)
|
||||||
|
|
||||||
|
|
||||||
|
tdDnodes = TDDnodes()
|
|
@ -0,0 +1,497 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDSimClient:
|
||||||
|
def __init__(self):
|
||||||
|
self.testCluster = False
|
||||||
|
|
||||||
|
self.cfgDict = {
|
||||||
|
"numOfLogLines": "100000000",
|
||||||
|
"locale": "en_US.UTF-8",
|
||||||
|
"charset": "UTF-8",
|
||||||
|
"asyncLog": "0",
|
||||||
|
"rpcDebugFlag": "135",
|
||||||
|
"tmrDebugFlag": "131",
|
||||||
|
"cDebugFlag": "135",
|
||||||
|
"udebugFlag": "135",
|
||||||
|
"jnidebugFlag": "135",
|
||||||
|
"qdebugFlag": "135",
|
||||||
|
"telemetryReporting": "0",
|
||||||
|
}
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
self.__init__()
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def getLogDir(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
return self.logDir
|
||||||
|
|
||||||
|
def getCfgDir(self):
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
return self.cfgDir
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def addExtraCfg(self, option, value):
|
||||||
|
self.cfgDict.update({option: value})
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def deploy(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
|
||||||
|
for key, value in self.cfgDict.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
|
||||||
|
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnode:
|
||||||
|
def __init__(self, index):
|
||||||
|
self.index = index
|
||||||
|
self.running = 0
|
||||||
|
self.deployed = 0
|
||||||
|
self.testCluster = False
|
||||||
|
self.valgrind = 0
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def getDataSize(self):
|
||||||
|
totalSize = 0
|
||||||
|
|
||||||
|
if (self.deployed == 1):
|
||||||
|
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||||
|
for f in filenames:
|
||||||
|
fp = os.path.join(dirpath, f)
|
||||||
|
|
||||||
|
if not os.path.islink(fp):
|
||||||
|
totalSize = totalSize + os.path.getsize(fp)
|
||||||
|
|
||||||
|
return totalSize
|
||||||
|
|
||||||
|
def deploy(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||||
|
self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "mkdir -p " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.startIP()
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("dataDir", self.dataDir)
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
self.cfg("numOfLogLines", "100000000")
|
||||||
|
self.cfg("mnodeEqualVnodeNum", "0")
|
||||||
|
self.cfg("walLevel", "2")
|
||||||
|
self.cfg("fsync", "1000")
|
||||||
|
self.cfg("statusInterval", "1")
|
||||||
|
self.cfg("numOfMnodes", "3")
|
||||||
|
self.cfg("numOfThreadsPerCore", "2.0")
|
||||||
|
self.cfg("monitor", "0")
|
||||||
|
self.cfg("maxVnodeConnections", "30000")
|
||||||
|
self.cfg("maxMgmtConnections", "30000")
|
||||||
|
self.cfg("maxMeterConnections", "30000")
|
||||||
|
self.cfg("maxShellConns", "30000")
|
||||||
|
self.cfg("locale", "en_US.UTF-8")
|
||||||
|
self.cfg("charset", "UTF-8")
|
||||||
|
self.cfg("asyncLog", "0")
|
||||||
|
self.cfg("anyIp", "0")
|
||||||
|
self.cfg("dDebugFlag", "135")
|
||||||
|
self.cfg("mDebugFlag", "135")
|
||||||
|
self.cfg("sdbDebugFlag", "135")
|
||||||
|
self.cfg("rpcDebugFlag", "135")
|
||||||
|
self.cfg("tmrDebugFlag", "131")
|
||||||
|
self.cfg("cDebugFlag", "135")
|
||||||
|
self.cfg("httpDebugFlag", "135")
|
||||||
|
self.cfg("monitorDebugFlag", "135")
|
||||||
|
self.cfg("udebugFlag", "135")
|
||||||
|
self.cfg("jnidebugFlag", "135")
|
||||||
|
self.cfg("qdebugFlag", "135")
|
||||||
|
self.deployed = 1
|
||||||
|
tdLog.debug(
|
||||||
|
"dnode:%d is deployed and configured by %s" %
|
||||||
|
(self.index, self.cfgPath))
|
||||||
|
|
||||||
|
def getBuildPath(self):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
return buildPath
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
buildPath = self.getBuildPath()
|
||||||
|
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found in %s" % buildPath)
|
||||||
|
|
||||||
|
binPath = buildPath + "/build/bin/taosd"
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
cmd = "nohup %s -c %s --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
|
||||||
|
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
|
||||||
|
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||||
|
|
||||||
|
def forcestop(self):
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||||
|
|
||||||
|
def startIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def stopIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||||
|
self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodeRootDir(self, index):
|
||||||
|
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||||
|
return dnodeRootDir
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnodes:
|
||||||
|
def __init__(self):
|
||||||
|
self.dnodes = []
|
||||||
|
self.dnodes.append(TDDnode(1))
|
||||||
|
self.dnodes.append(TDDnode(2))
|
||||||
|
self.dnodes.append(TDDnode(3))
|
||||||
|
self.dnodes.append(TDDnode(4))
|
||||||
|
self.dnodes.append(TDDnode(5))
|
||||||
|
self.dnodes.append(TDDnode(6))
|
||||||
|
self.dnodes.append(TDDnode(7))
|
||||||
|
self.dnodes.append(TDDnode(8))
|
||||||
|
self.dnodes.append(TDDnode(9))
|
||||||
|
self.dnodes.append(TDDnode(10))
|
||||||
|
self.simDeployed = False
|
||||||
|
|
||||||
|
def init(self, path):
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
binPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
binPath = binPath + "/../../../debug/"
|
||||||
|
tdLog.debug("binPath %s" % (binPath))
|
||||||
|
binPath = os.path.realpath(binPath)
|
||||||
|
tdLog.debug("binPath real path %s" % (binPath))
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
|
||||||
|
# tdLog.debug(cmd)
|
||||||
|
# os.system(cmd)
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
# tdLog.debug("execute %s" % (cmd))
|
||||||
|
|
||||||
|
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
# tdLog.debug("execute %s" % (cmd))
|
||||||
|
|
||||||
|
if path == "":
|
||||||
|
# self.path = os.path.expanduser('~')
|
||||||
|
self.path = os.path.abspath(binPath + "../../")
|
||||||
|
else:
|
||||||
|
self.path = os.path.realpath(path)
|
||||||
|
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].init(self.path)
|
||||||
|
|
||||||
|
self.sim = TDSimClient()
|
||||||
|
self.sim.init(self.path)
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def deploy(self, index):
|
||||||
|
self.sim.setTestCluster(self.testCluster)
|
||||||
|
|
||||||
|
if (self.simDeployed == False):
|
||||||
|
self.sim.deploy()
|
||||||
|
self.simDeployed = True
|
||||||
|
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||||
|
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||||
|
self.dnodes[index - 1].deploy()
|
||||||
|
|
||||||
|
def cfg(self, index, option, value):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].cfg(option, value)
|
||||||
|
|
||||||
|
def start(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].start()
|
||||||
|
|
||||||
|
def stop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].stop()
|
||||||
|
|
||||||
|
def getDataSize(self, index):
|
||||||
|
self.check(index)
|
||||||
|
return self.dnodes[index - 1].getDataSize()
|
||||||
|
|
||||||
|
def forcestop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].forcestop()
|
||||||
|
|
||||||
|
def startIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.dnodes[index - 1].startIP()
|
||||||
|
|
||||||
|
def stopIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.dnodes[index - 1].testCluster:
|
||||||
|
self.dnodes[index - 1].stopIP()
|
||||||
|
|
||||||
|
def check(self, index):
|
||||||
|
if index < 1 or index > 10:
|
||||||
|
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||||
|
|
||||||
|
def stopAll(self):
|
||||||
|
tdLog.info("stop all dnodes")
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].stop()
|
||||||
|
|
||||||
|
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
if processID:
|
||||||
|
cmd = "sudo systemctl stop taosd"
|
||||||
|
os.system(cmd)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8")
|
||||||
|
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = "%s/sim" % (self.path)
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
def getSimCfgPath(self):
|
||||||
|
return self.sim.getCfgDir()
|
||||||
|
|
||||||
|
def getSimLogPath(self):
|
||||||
|
return self.sim.getLogDir()
|
||||||
|
|
||||||
|
def addSimExtraCfg(self, option, value):
|
||||||
|
self.sim.addExtraCfg(option, value)
|
||||||
|
|
||||||
|
|
||||||
|
tdDnodes = TDDnodes()
|
|
@ -0,0 +1,890 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import platform
|
||||||
|
import distro
|
||||||
|
import subprocess
|
||||||
|
from time import sleep
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import copy
|
||||||
|
from fabric2 import Connection
|
||||||
|
from util.log import *
|
||||||
|
from shutil import which
|
||||||
|
|
||||||
|
|
||||||
|
class TDSimClient:
|
||||||
|
def __init__(self, path):
|
||||||
|
self.testCluster = False
|
||||||
|
self.path = path
|
||||||
|
self.cfgDict = {
|
||||||
|
"fqdn": "localhost",
|
||||||
|
"numOfLogLines": "100000000",
|
||||||
|
"locale": "en_US.UTF-8",
|
||||||
|
"charset": "UTF-8",
|
||||||
|
"asyncLog": "0",
|
||||||
|
"rpcDebugFlag": "135",
|
||||||
|
"tmrDebugFlag": "131",
|
||||||
|
"cDebugFlag": "135",
|
||||||
|
"uDebugFlag": "135",
|
||||||
|
"jniDebugFlag": "135",
|
||||||
|
"qDebugFlag": "135",
|
||||||
|
"supportVnodes": "1024",
|
||||||
|
"enableQueryHb": "1",
|
||||||
|
"telemetryReporting": "0",
|
||||||
|
"tqDebugflag": "135",
|
||||||
|
"wDebugflag":"135",
|
||||||
|
}
|
||||||
|
|
||||||
|
def getLogDir(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
return self.logDir
|
||||||
|
|
||||||
|
def getCfgDir(self):
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
return self.cfgDir
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def addExtraCfg(self, option, value):
|
||||||
|
self.cfgDict.update({option: value})
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def deploy(self, *updatecfgDict):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.logDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.logDir)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.cfgDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.cfgDir)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
|
||||||
|
for key, value in self.cfgDict.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
||||||
|
clientCfg = dict (updatecfgDict[0][0].get('clientCfg'))
|
||||||
|
for key, value in clientCfg.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnode:
|
||||||
|
def __init__(self, index):
|
||||||
|
self.index = index
|
||||||
|
self.running = 0
|
||||||
|
self.deployed = 0
|
||||||
|
self.testCluster = False
|
||||||
|
self.valgrind = 0
|
||||||
|
self.asan = False
|
||||||
|
self.remoteIP = ""
|
||||||
|
self.cfgDict = {
|
||||||
|
"fqdn": "localhost",
|
||||||
|
"monitor": "0",
|
||||||
|
"maxShellConns": "30000",
|
||||||
|
"locale": "en_US.UTF-8",
|
||||||
|
"charset": "UTF-8",
|
||||||
|
"asyncLog": "0",
|
||||||
|
"mDebugFlag": "143",
|
||||||
|
"dDebugFlag": "143",
|
||||||
|
"vDebugFlag": "143",
|
||||||
|
"tqDebugFlag": "143",
|
||||||
|
"cDebugFlag": "143",
|
||||||
|
"stDebugFlag": "143",
|
||||||
|
"smaDebugFlag": "143",
|
||||||
|
"jniDebugFlag": "143",
|
||||||
|
"qDebugFlag": "143",
|
||||||
|
"rpcDebugFlag": "143",
|
||||||
|
"tmrDebugFlag": "131",
|
||||||
|
"uDebugFlag": "135",
|
||||||
|
"sDebugFlag": "135",
|
||||||
|
"wDebugFlag": "135",
|
||||||
|
"numOfLogLines": "100000000",
|
||||||
|
"statusInterval": "1",
|
||||||
|
"enableQueryHb": "1",
|
||||||
|
"supportVnodes": "1024",
|
||||||
|
"telemetryReporting": "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
def init(self, path, remoteIP = ""):
|
||||||
|
self.path = path
|
||||||
|
self.remoteIP = remoteIP
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
try:
|
||||||
|
self.config = eval(self.remoteIP)
|
||||||
|
self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
|
||||||
|
except Exception as r:
|
||||||
|
print(r)
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def setAsan(self, value):
|
||||||
|
self.asan = value
|
||||||
|
if value:
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
if ("community" in selfPath):
|
||||||
|
self.execPath = os.path.abspath(self.path + "/community/tests/script/sh/exec.sh")
|
||||||
|
else:
|
||||||
|
self.execPath = os.path.abspath(self.path + "/tests/script/sh/exec.sh")
|
||||||
|
|
||||||
|
def getDataSize(self):
|
||||||
|
totalSize = 0
|
||||||
|
|
||||||
|
if (self.deployed == 1):
|
||||||
|
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||||
|
for f in filenames:
|
||||||
|
fp = os.path.join(dirpath, f)
|
||||||
|
|
||||||
|
if not os.path.islink(fp):
|
||||||
|
totalSize = totalSize + os.path.getsize(fp)
|
||||||
|
|
||||||
|
return totalSize
|
||||||
|
|
||||||
|
def addExtraCfg(self, option, value):
|
||||||
|
self.cfgDict.update({option: value})
|
||||||
|
|
||||||
|
def remoteExec(self, updateCfgDict, execCmd):
|
||||||
|
valgrindStr = ''
|
||||||
|
if (self.valgrind==1):
|
||||||
|
valgrindStr = '-g'
|
||||||
|
remoteCfgDict = copy.deepcopy(updateCfgDict)
|
||||||
|
if ("logDir" in remoteCfgDict):
|
||||||
|
del remoteCfgDict["logDir"]
|
||||||
|
if ("dataDir" in remoteCfgDict):
|
||||||
|
del remoteCfgDict["dataDir"]
|
||||||
|
if ("cfgDir" in remoteCfgDict):
|
||||||
|
del remoteCfgDict["cfgDir"]
|
||||||
|
remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode()
|
||||||
|
execCmdStr = base64.b64encode(execCmd.encode()).decode()
|
||||||
|
with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
|
||||||
|
self.remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr))
|
||||||
|
|
||||||
|
def deploy(self, *updatecfgDict):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||||
|
self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.dataDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.dataDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.dataDir)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.logDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.logDir)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.cfgDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.cfgDir)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.startIP()
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfgDict["dataDir"] = self.dataDir
|
||||||
|
self.cfgDict["logDir"] = self.logDir
|
||||||
|
# self.cfg("dataDir",self.dataDir)
|
||||||
|
# self.cfg("logDir",self.logDir)
|
||||||
|
# print(updatecfgDict)
|
||||||
|
isFirstDir = 1
|
||||||
|
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
||||||
|
for key, value in updatecfgDict[0][0].items():
|
||||||
|
if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows':
|
||||||
|
continue
|
||||||
|
if value == 'dataDir':
|
||||||
|
if isFirstDir:
|
||||||
|
self.cfgDict.pop('dataDir')
|
||||||
|
self.cfg(value, key)
|
||||||
|
isFirstDir = 0
|
||||||
|
else:
|
||||||
|
self.cfg(value, key)
|
||||||
|
else:
|
||||||
|
self.addExtraCfg(key, value)
|
||||||
|
if (self.remoteIP == ""):
|
||||||
|
for key, value in self.cfgDict.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
else:
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index)
|
||||||
|
|
||||||
|
self.deployed = 1
|
||||||
|
tdLog.debug(
|
||||||
|
"dnode:%d is deployed and configured by %s" %
|
||||||
|
(self.index, self.cfgPath))
|
||||||
|
|
||||||
|
def getPath(self, tool="taosd"):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
paths = []
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ((tool) in files or ("%s.exe"%tool) in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
paths.append(os.path.join(root, tool))
|
||||||
|
break
|
||||||
|
if (len(paths) == 0):
|
||||||
|
return ""
|
||||||
|
return paths[0]
|
||||||
|
|
||||||
|
def starttaosd(self):
|
||||||
|
binPath = self.getPath()
|
||||||
|
|
||||||
|
if (binPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found: %s" % binPath)
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s -c %s" % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
if self.asan:
|
||||||
|
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||||
|
self.path, self.index)
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||||
|
binPath, self.cfgDir, asanDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||||
|
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s %s -c %s" % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||||
|
self.running = 1
|
||||||
|
else:
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
if self.valgrind == 0:
|
||||||
|
time.sleep(0.1)
|
||||||
|
key1 = 'from offline to online'
|
||||||
|
bkey1 = bytes(key1, encoding="utf8")
|
||||||
|
key2= 'TDengine initialized successfully'
|
||||||
|
bkey2 = bytes(key2, encoding="utf8")
|
||||||
|
logFile = self.logDir + "/taosdlog.0"
|
||||||
|
i = 0
|
||||||
|
# while not os.path.exists(logFile):
|
||||||
|
# sleep(0.1)
|
||||||
|
# i += 1
|
||||||
|
# if i > 10:
|
||||||
|
# break
|
||||||
|
# tailCmdStr = 'tail -f '
|
||||||
|
# if platform.system().lower() == 'windows':
|
||||||
|
# tailCmdStr = 'tail -n +0 -f '
|
||||||
|
# popen = subprocess.Popen(
|
||||||
|
# tailCmdStr + logFile,
|
||||||
|
# stdout=subprocess.PIPE,
|
||||||
|
# stderr=subprocess.PIPE,
|
||||||
|
# shell=True)
|
||||||
|
# pid = popen.pid
|
||||||
|
# # print('Popen.pid:' + str(pid))
|
||||||
|
# timeout = time.time() + 60 * 2
|
||||||
|
# while True:
|
||||||
|
# line = popen.stdout.readline().strip()
|
||||||
|
# print(line)
|
||||||
|
# if bkey1 in line:
|
||||||
|
# popen.kill()
|
||||||
|
# break
|
||||||
|
# elif bkey2 in line:
|
||||||
|
# popen.kill()
|
||||||
|
# break
|
||||||
|
# if time.time() > timeout:
|
||||||
|
# print(time.time(),timeout)
|
||||||
|
# tdLog.exit('wait too long for taosd start')
|
||||||
|
tdLog.debug("the dnode:%d has been started." % (self.index))
|
||||||
|
else:
|
||||||
|
tdLog.debug(
|
||||||
|
"wait 10 seconds for the dnode:%d to start." %
|
||||||
|
(self.index))
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
binPath = self.getPath()
|
||||||
|
|
||||||
|
if (binPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found: %s" % binPath)
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s -c %s" % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
if self.asan:
|
||||||
|
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||||
|
self.path, self.index)
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||||
|
binPath, self.cfgDir, asanDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||||
|
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s %s -c %s" % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||||
|
self.running = 1
|
||||||
|
else:
|
||||||
|
os.system("rm -rf %s/taosdlog.0"%self.logDir)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
if self.valgrind == 0:
|
||||||
|
time.sleep(0.1)
|
||||||
|
key = 'from offline to online'
|
||||||
|
bkey = bytes(key, encoding="utf8")
|
||||||
|
logFile = self.logDir + "/taosdlog.0"
|
||||||
|
i = 0
|
||||||
|
while not os.path.exists(logFile):
|
||||||
|
sleep(0.1)
|
||||||
|
i += 1
|
||||||
|
if i > 50:
|
||||||
|
break
|
||||||
|
with open(logFile) as f:
|
||||||
|
timeout = time.time() + 10 * 2
|
||||||
|
while True:
|
||||||
|
line = f.readline().encode('utf-8')
|
||||||
|
if bkey in line:
|
||||||
|
break
|
||||||
|
if time.time() > timeout:
|
||||||
|
tdLog.exit('wait too long for taosd start')
|
||||||
|
tdLog.debug("the dnode:%d has been started." % (self.index))
|
||||||
|
else:
|
||||||
|
tdLog.debug(
|
||||||
|
"wait 10 seconds for the dnode:%d to start." %
|
||||||
|
(self.index))
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def startWithoutSleep(self):
|
||||||
|
binPath = self.getPath()
|
||||||
|
|
||||||
|
if (binPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found: %s" % binPath)
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s -c %s" % (binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
if self.asan:
|
||||||
|
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||||
|
self.path, self.index)
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||||
|
binPath, self.cfgDir, asanDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s %s -c %s" % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if (self.remoteIP == ""):
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
else:
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.startWithoutSleep(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||||
|
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if self.asan:
|
||||||
|
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
||||||
|
tdLog.info("execute script: " + stopCmd)
|
||||||
|
os.system(stopCmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||||
|
tdLog.info("stop dnode%d"%self.index)
|
||||||
|
return
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
|
||||||
|
onlyKillOnceWindows = 0
|
||||||
|
while(processID):
|
||||||
|
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||||
|
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
killCmd = "kill -INT %s > nul 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
onlyKillOnceWindows = 1
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
if not platform.system().lower() == 'windows':
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d > /dev/null" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||||
|
|
||||||
|
|
||||||
|
def stoptaosd(self):
|
||||||
|
tdLog.debug("start to stop taosd on dnode: %d "% (self.index))
|
||||||
|
# print(self.asan,self.running,self.remoteIP,self.valgrind)
|
||||||
|
if self.asan:
|
||||||
|
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
||||||
|
tdLog.info("execute script: " + stopCmd)
|
||||||
|
os.system(stopCmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||||
|
tdLog.info("stop dnode%d"%self.index)
|
||||||
|
return
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^' ^&^& echo aa') do @(ps | grep %%a | awk '{print $1}' | xargs)" % (self.index)
|
||||||
|
else:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}' | xargs" % (toBeKilled,self.index)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
|
||||||
|
onlyKillOnceWindows = 0
|
||||||
|
while(processID):
|
||||||
|
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||||
|
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
killCmd = "kill -INT %s > nul 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
onlyKillOnceWindows = 1
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||||
|
|
||||||
|
def forcestop(self):
|
||||||
|
if self.asan:
|
||||||
|
stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \
|
||||||
|
(self.execPath, self.index)
|
||||||
|
tdLog.info("execute script: " + stopCmd)
|
||||||
|
os.system(stopCmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1))
|
||||||
|
return
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
|
||||||
|
onlyKillOnceWindows = 0
|
||||||
|
while(processID):
|
||||||
|
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||||
|
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
onlyKillOnceWindows = 1
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||||
|
|
||||||
|
def startIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def stopIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||||
|
self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodeRootDir(self, index):
|
||||||
|
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||||
|
return dnodeRootDir
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnodes:
|
||||||
|
def __init__(self):
|
||||||
|
self.dnodes = []
|
||||||
|
self.dnodes.append(TDDnode(1))
|
||||||
|
self.dnodes.append(TDDnode(2))
|
||||||
|
self.dnodes.append(TDDnode(3))
|
||||||
|
self.dnodes.append(TDDnode(4))
|
||||||
|
self.dnodes.append(TDDnode(5))
|
||||||
|
self.dnodes.append(TDDnode(6))
|
||||||
|
self.dnodes.append(TDDnode(7))
|
||||||
|
self.dnodes.append(TDDnode(8))
|
||||||
|
self.dnodes.append(TDDnode(9))
|
||||||
|
self.dnodes.append(TDDnode(10))
|
||||||
|
self.simDeployed = False
|
||||||
|
self.testCluster = False
|
||||||
|
self.valgrind = 0
|
||||||
|
self.asan = False
|
||||||
|
self.killValgrind = 0
|
||||||
|
|
||||||
|
def init(self, path, remoteIP = ""):
|
||||||
|
binPath = self.dnodes[0].getPath() + "/../../../"
|
||||||
|
# tdLog.debug("binPath %s" % (binPath))
|
||||||
|
binPath = os.path.realpath(binPath)
|
||||||
|
# tdLog.debug("binPath real path %s" % (binPath))
|
||||||
|
|
||||||
|
if path == "":
|
||||||
|
self.path = os.path.abspath(binPath + "../../")
|
||||||
|
else:
|
||||||
|
self.path = os.path.realpath(path)
|
||||||
|
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].init(self.path, remoteIP)
|
||||||
|
self.sim = TDSimClient(self.path)
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def setAsan(self, value):
|
||||||
|
self.asan = value
|
||||||
|
if value:
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
if ("community" in selfPath):
|
||||||
|
self.stopDnodesPath = os.path.abspath(self.path + "/community/tests/script/sh/stop_dnodes.sh")
|
||||||
|
self.stopDnodesSigintPath = os.path.abspath(self.path + "/community/tests/script/sh/sigint_stop_dnodes.sh")
|
||||||
|
else:
|
||||||
|
self.stopDnodesPath = os.path.abspath(self.path + "/tests/script/sh/stop_dnodes.sh")
|
||||||
|
self.stopDnodesSigintPath = os.path.abspath(self.path + "/tests/script/sh/sigint_stop_dnodes.sh")
|
||||||
|
tdLog.info("run in address sanitizer mode")
|
||||||
|
|
||||||
|
def setKillValgrind(self, value):
|
||||||
|
self.killValgrind = value
|
||||||
|
|
||||||
|
def deploy(self, index, *updatecfgDict):
|
||||||
|
self.sim.setTestCluster(self.testCluster)
|
||||||
|
|
||||||
|
if (self.simDeployed == False):
|
||||||
|
self.sim.deploy(updatecfgDict)
|
||||||
|
self.simDeployed = True
|
||||||
|
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||||
|
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||||
|
self.dnodes[index - 1].setAsan(self.asan)
|
||||||
|
self.dnodes[index - 1].deploy(updatecfgDict)
|
||||||
|
|
||||||
|
def cfg(self, index, option, value):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].cfg(option, value)
|
||||||
|
|
||||||
|
def starttaosd(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].starttaosd()
|
||||||
|
|
||||||
|
def stoptaosd(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].stoptaosd()
|
||||||
|
|
||||||
|
def start(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].start()
|
||||||
|
|
||||||
|
def startWithoutSleep(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].startWithoutSleep()
|
||||||
|
|
||||||
|
def stop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].stop()
|
||||||
|
|
||||||
|
def getDataSize(self, index):
|
||||||
|
self.check(index)
|
||||||
|
return self.dnodes[index - 1].getDataSize()
|
||||||
|
|
||||||
|
def forcestop(self, index):
|
||||||
|
self.check(index)
|
||||||
|
self.dnodes[index - 1].forcestop()
|
||||||
|
|
||||||
|
def startIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.dnodes[index - 1].startIP()
|
||||||
|
|
||||||
|
def stopIP(self, index):
|
||||||
|
self.check(index)
|
||||||
|
|
||||||
|
if self.dnodes[index - 1].testCluster:
|
||||||
|
self.dnodes[index - 1].stopIP()
|
||||||
|
|
||||||
|
def check(self, index):
|
||||||
|
if index < 1 or index > 10:
|
||||||
|
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||||
|
|
||||||
|
def StopAllSigint(self):
|
||||||
|
tdLog.info("stop all dnodes sigint, asan:%d" % self.asan)
|
||||||
|
if self.asan:
|
||||||
|
tdLog.info("execute script: %s" % self.stopDnodesSigintPath)
|
||||||
|
os.system(self.stopDnodesSigintPath)
|
||||||
|
tdLog.info("execute finished")
|
||||||
|
return
|
||||||
|
|
||||||
|
def killProcesser(self, processerName):
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName)
|
||||||
|
psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName))
|
||||||
|
else:
|
||||||
|
killCmd = (
|
||||||
|
"ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1"
|
||||||
|
% processerName
|
||||||
|
)
|
||||||
|
psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName)
|
||||||
|
|
||||||
|
processID = ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True)
|
||||||
|
while processID:
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
try:
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True)
|
||||||
|
except Exception as err:
|
||||||
|
processID = ""
|
||||||
|
tdLog.debug('**** kill pid warn: {err}')
|
||||||
|
except Exception as err:
|
||||||
|
processID = ""
|
||||||
|
tdLog.debug(f'**** find pid warn: {err}')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def stopAll(self):
|
||||||
|
tdLog.info("stop all dnodes, asan:%d" % self.asan)
|
||||||
|
if platform.system().lower() != 'windows':
|
||||||
|
distro_id = distro.id()
|
||||||
|
else:
|
||||||
|
distro_id = "not alpine"
|
||||||
|
if self.asan and distro_id != "alpine":
|
||||||
|
tdLog.info("execute script: %s" % self.stopDnodesPath)
|
||||||
|
os.system(self.stopDnodesPath)
|
||||||
|
tdLog.info("execute finished")
|
||||||
|
return
|
||||||
|
|
||||||
|
if (not self.dnodes[0].remoteIP == ""):
|
||||||
|
self.dnodes[0].remoteExec(self.dnodes[0].cfgDict, "for i in range(len(tdDnodes.dnodes)):\n tdDnodes.dnodes[i].running=1\ntdDnodes.stopAll()")
|
||||||
|
return
|
||||||
|
for i in range(len(self.dnodes)):
|
||||||
|
self.dnodes[i].stop()
|
||||||
|
|
||||||
|
|
||||||
|
if (distro_id == "alpine"):
|
||||||
|
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
while(processID):
|
||||||
|
print(processID)
|
||||||
|
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
elif platform.system().lower() == 'windows':
|
||||||
|
self.killProcesser("taosd")
|
||||||
|
self.killProcesser("tmq_sim")
|
||||||
|
self.killProcesser("taosBenchmark")
|
||||||
|
else:
|
||||||
|
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
if processID:
|
||||||
|
cmd = "sudo systemctl stop taosd"
|
||||||
|
os.system(cmd)
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill -9 %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
if self.killValgrind == 1:
|
||||||
|
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
while(processID):
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
killCmd = "kill -TERM %s > nul 2>&1" % processID
|
||||||
|
else:
|
||||||
|
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
|
||||||
|
# if os.system(cmd) != 0 :
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = "%s/sim" % (self.path)
|
||||||
|
return dnodesRootDir
|
||||||
|
|
||||||
|
def getSimCfgPath(self):
|
||||||
|
return self.sim.getCfgDir()
|
||||||
|
|
||||||
|
def getSimLogPath(self):
|
||||||
|
return self.sim.getLogDir()
|
||||||
|
|
||||||
|
def addSimExtraCfg(self, option, value):
|
||||||
|
self.sim.addExtraCfg(option, value)
|
||||||
|
|
||||||
|
def getAsan(self):
|
||||||
|
return self.asan
|
||||||
|
|
||||||
|
tdDnodes = TDDnodes()
|
|
@ -0,0 +1,65 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import time
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
class GetTime:
|
||||||
|
|
||||||
|
def get_ms_timestamp(self,ts_str):
|
||||||
|
_ts_str = ts_str
|
||||||
|
if "+" in _ts_str:
|
||||||
|
timestamp = datetime.fromisoformat(_ts_str)
|
||||||
|
return int((timestamp-datetime.fromtimestamp(0,timestamp.tzinfo)).total_seconds())*1000+int(timestamp.microsecond / 1000)
|
||||||
|
if " " in ts_str:
|
||||||
|
p = ts_str.split(" ")[1]
|
||||||
|
if len(p) > 15 :
|
||||||
|
_ts_str = ts_str[:-3]
|
||||||
|
if ':' in _ts_str and '.' in _ts_str:
|
||||||
|
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S.%f")
|
||||||
|
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
|
||||||
|
elif ':' in _ts_str and '.' not in _ts_str:
|
||||||
|
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S")
|
||||||
|
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
|
||||||
|
else:
|
||||||
|
timestamp = datetime.strptime(_ts_str, "%Y-%m-%d")
|
||||||
|
date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000)
|
||||||
|
return date_time
|
||||||
|
def get_us_timestamp(self,ts_str):
|
||||||
|
_ts = self.get_ms_timestamp(ts_str) * 1000
|
||||||
|
if " " in ts_str:
|
||||||
|
p = ts_str.split(" ")[1]
|
||||||
|
if len(p) > 12:
|
||||||
|
us_ts = p[12:15]
|
||||||
|
_ts += int(us_ts)
|
||||||
|
return _ts
|
||||||
|
def get_ns_timestamp(self,ts_str):
|
||||||
|
_ts = self.get_us_timestamp(ts_str) *1000
|
||||||
|
if " " in ts_str:
|
||||||
|
p = ts_str.split(" ")[1]
|
||||||
|
if len(p) > 15:
|
||||||
|
us_ts = p[15:]
|
||||||
|
_ts += int(us_ts)
|
||||||
|
return _ts
|
||||||
|
def time_transform(self,ts_str,precision):
|
||||||
|
date_time = []
|
||||||
|
if precision == 'ms':
|
||||||
|
for i in ts_str:
|
||||||
|
date_time.append(self.get_ms_timestamp(i))
|
||||||
|
elif precision == 'us':
|
||||||
|
for i in ts_str:
|
||||||
|
date_time.append(self.get_us_timestamp(i))
|
||||||
|
elif precision == 'ns':
|
||||||
|
for i in ts_str:
|
||||||
|
date_time.append(self.get_ns_timestamp(i))
|
||||||
|
return date_time
|
|
@ -0,0 +1,49 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import datetime
|
||||||
|
from distutils.log import warn as printf
|
||||||
|
|
||||||
|
|
||||||
|
class TDLog:
|
||||||
|
def __init__(self):
|
||||||
|
self.path = ""
|
||||||
|
|
||||||
|
def info(self, info):
|
||||||
|
print("%s %s\n" % (datetime.datetime.now(), info))
|
||||||
|
|
||||||
|
def sleep(self, sec):
|
||||||
|
print("%s sleep %d seconds" % (datetime.datetime.now(), sec))
|
||||||
|
time.sleep(sec)
|
||||||
|
|
||||||
|
def debug(self, err):
|
||||||
|
print("\033[1;36m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||||
|
|
||||||
|
def success(self, info):
|
||||||
|
printf("\033[1;32m%s %s\033[0m" % (datetime.datetime.now(), info))
|
||||||
|
|
||||||
|
def notice(self, err):
|
||||||
|
print("\033[1;33m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||||
|
|
||||||
|
def exit(self, err):
|
||||||
|
print("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
def printNoPrefix(self, info):
|
||||||
|
print("\033[1;36m%s\033[0m" % (info))
|
||||||
|
|
||||||
|
|
||||||
|
tdLog = TDLog()
|
|
@ -0,0 +1,83 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
|
||||||
|
import os
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class TDFindPath:
|
||||||
|
"""This class is for finding path within TDengine
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.file = ""
|
||||||
|
|
||||||
|
|
||||||
|
def init(self, file):
|
||||||
|
"""[summary]
|
||||||
|
|
||||||
|
Args:
|
||||||
|
file (str): the file location you want to start the query. Generally using __file__
|
||||||
|
"""
|
||||||
|
self.file = file
|
||||||
|
|
||||||
|
def getTaosdemoPath(self):
|
||||||
|
"""for finding the path of directory containing taosdemo
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: the path to directory containing taosdemo
|
||||||
|
"""
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(self.file))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("taosd" in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
buildPath = root[:len(root)-len("/build/bin")]
|
||||||
|
break
|
||||||
|
if (buildPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info(f"taosd found in {buildPath}")
|
||||||
|
return buildPath + "/build/bin/"
|
||||||
|
|
||||||
|
def getTDenginePath(self):
|
||||||
|
"""for finding the root path of TDengine
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: the root path of TDengine
|
||||||
|
"""
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(self.file))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
print(projPath)
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ("sim" in dirs):
|
||||||
|
print(root)
|
||||||
|
rootRealPath = os.path.realpath(root)
|
||||||
|
if (rootRealPath == ""):
|
||||||
|
tdLog.exit("TDengine not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info(f"TDengine found in {rootRealPath}")
|
||||||
|
return rootRealPath
|
||||||
|
|
||||||
|
tdFindPath = TDFindPath()
|
|
@ -0,0 +1,655 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import datetime
|
||||||
|
import inspect
|
||||||
|
import traceback
|
||||||
|
import psutil
|
||||||
|
import shutil
|
||||||
|
import pandas as pd
|
||||||
|
from util.log import *
|
||||||
|
from util.constant import *
|
||||||
|
|
||||||
|
# from datetime import timezone
|
||||||
|
import time
|
||||||
|
|
||||||
|
def _parse_ns_timestamp(timestr):
|
||||||
|
dt_obj = datetime.datetime.strptime(timestr[:len(timestr)-3], "%Y-%m-%d %H:%M:%S.%f")
|
||||||
|
tz = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e9) + int(dt_obj.microsecond * 1000) + int(timestr[-3:])
|
||||||
|
return tz
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_datetime(timestr):
|
||||||
|
try:
|
||||||
|
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
class TDSql:
|
||||||
|
def __init__(self):
|
||||||
|
self.queryRows = 0
|
||||||
|
self.queryCols = 0
|
||||||
|
self.affectedRows = 0
|
||||||
|
|
||||||
|
def init(self, cursor, log=False):
|
||||||
|
self.cursor = cursor
|
||||||
|
|
||||||
|
if (log):
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
self.cursor.log(caller.filename + ".sql")
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.cursor.close()
|
||||||
|
|
||||||
|
def prepare(self, dbname="db", drop=True, **kwargs):
|
||||||
|
tdLog.info(f"prepare database:{dbname}")
|
||||||
|
s = 'reset query cache'
|
||||||
|
try:
|
||||||
|
self.cursor.execute(s)
|
||||||
|
except:
|
||||||
|
tdLog.notice("'reset query cache' is not supported")
|
||||||
|
if drop:
|
||||||
|
s = f'drop database if exists {dbname}'
|
||||||
|
self.cursor.execute(s)
|
||||||
|
s = f'create database {dbname}'
|
||||||
|
for k, v in kwargs.items():
|
||||||
|
s += f" {k} {v}"
|
||||||
|
if "duration" not in kwargs:
|
||||||
|
s += " duration 300"
|
||||||
|
self.cursor.execute(s)
|
||||||
|
s = f'use {dbname}'
|
||||||
|
self.cursor.execute(s)
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
def error(self, sql, expectedErrno = None, expectErrInfo = None):
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
expectErrNotOccured = True
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
except BaseException as e:
|
||||||
|
expectErrNotOccured = False
|
||||||
|
self.errno = e.errno
|
||||||
|
error_info = repr(e)
|
||||||
|
self.error_info = ','.join(error_info[error_info.index('(')+1:-1].split(",")[:-1]).replace("'","")
|
||||||
|
# self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
|
||||||
|
if expectErrNotOccured:
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
|
||||||
|
else:
|
||||||
|
self.queryRows = 0
|
||||||
|
self.queryCols = 0
|
||||||
|
self.queryResult = None
|
||||||
|
|
||||||
|
if expectedErrno != None:
|
||||||
|
if expectedErrno == self.errno:
|
||||||
|
tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, errno %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno))
|
||||||
|
else:
|
||||||
|
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||||
|
|
||||||
|
if expectErrInfo != None:
|
||||||
|
if expectErrInfo == self.error_info or expectErrInfo in self.error_info:
|
||||||
|
tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
||||||
|
else:
|
||||||
|
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||||
|
|
||||||
|
return self.error_info
|
||||||
|
|
||||||
|
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
|
||||||
|
self.sql = sql
|
||||||
|
i=1
|
||||||
|
while i <= queryTimes:
|
||||||
|
try:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
self.queryResult = self.cursor.fetchall()
|
||||||
|
self.queryRows = len(self.queryResult)
|
||||||
|
self.queryCols = len(self.cursor.description)
|
||||||
|
|
||||||
|
if count_expected_res is not None:
|
||||||
|
counter = 0
|
||||||
|
while count_expected_res != self.queryResult[0][0]:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
self.queryResult = self.cursor.fetchall()
|
||||||
|
if counter < queryTimes:
|
||||||
|
counter += 0.5
|
||||||
|
time.sleep(0.5)
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
if row_tag:
|
||||||
|
return self.queryResult
|
||||||
|
return self.queryRows
|
||||||
|
except Exception as e:
|
||||||
|
tdLog.notice("Try to query again, query times: %d "%i)
|
||||||
|
if i == queryTimes:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
i+=1
|
||||||
|
time.sleep(1)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def is_err_sql(self, sql):
|
||||||
|
err_flag = True
|
||||||
|
try:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
except BaseException:
|
||||||
|
err_flag = False
|
||||||
|
|
||||||
|
return False if err_flag else True
|
||||||
|
|
||||||
|
def getVariable(self, search_attr):
|
||||||
|
'''
|
||||||
|
get variable of search_attr access "show variables"
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
sql = 'show variables'
|
||||||
|
param_list = self.query(sql, row_tag=True)
|
||||||
|
for param in param_list:
|
||||||
|
if param[0] == search_attr:
|
||||||
|
return param[1], param_list
|
||||||
|
except Exception as e:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
|
||||||
|
def getColNameList(self, sql, col_tag=None):
|
||||||
|
self.sql = sql
|
||||||
|
try:
|
||||||
|
col_name_list = []
|
||||||
|
col_type_list = []
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
for query_col in self.cursor.description:
|
||||||
|
col_name_list.append(query_col[0])
|
||||||
|
col_type_list.append(query_col[1])
|
||||||
|
except Exception as e:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
if col_tag:
|
||||||
|
return col_name_list, col_type_list
|
||||||
|
return col_name_list
|
||||||
|
|
||||||
|
def waitedQuery(self, sql, expectRows, timeout):
|
||||||
|
tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout))
|
||||||
|
self.sql = sql
|
||||||
|
try:
|
||||||
|
for i in range(timeout):
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
self.queryResult = self.cursor.fetchall()
|
||||||
|
self.queryRows = len(self.queryResult)
|
||||||
|
self.queryCols = len(self.cursor.description)
|
||||||
|
tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
|
||||||
|
if self.queryRows >= expectRows:
|
||||||
|
return (self.queryRows, i)
|
||||||
|
time.sleep(1)
|
||||||
|
except Exception as e:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
return (self.queryRows, timeout)
|
||||||
|
|
||||||
|
def getRows(self):
|
||||||
|
return self.queryRows
|
||||||
|
|
||||||
|
def checkRows(self, expectRows):
|
||||||
|
if self.queryRows == expectRows:
|
||||||
|
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
|
||||||
|
|
||||||
|
def checkRows_range(self, excepte_row_list):
|
||||||
|
if self.queryRows in excepte_row_list:
|
||||||
|
tdLog.info(f"sql:{self.sql}, queryRows:{self.queryRows} in expect:{excepte_row_list}")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{self.sql}, queryRows:{self.queryRows} not in expect:{excepte_row_list}")
|
||||||
|
|
||||||
|
def checkCols(self, expectCols):
|
||||||
|
if self.queryCols == expectCols:
|
||||||
|
tdLog.info("sql:%s, queryCols:%d == expect:%d" % (self.sql, self.queryCols, expectCols))
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, self.queryCols, expectCols)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, queryCols:%d != expect:%d" % args)
|
||||||
|
|
||||||
|
def checkRowCol(self, row, col):
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||||
|
if row < 0:
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
|
||||||
|
if col < 0:
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
|
||||||
|
if row > self.queryRows:
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, self.queryRows)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||||
|
if col > self.queryCols:
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, col, self.queryCols)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||||
|
|
||||||
|
def checkDataType(self, row, col, dataType):
|
||||||
|
self.checkRowCol(row, col)
|
||||||
|
return self.cursor.istype(col, dataType)
|
||||||
|
|
||||||
|
|
||||||
|
def checkData(self, row, col, data, show = False):
|
||||||
|
if row >= self.queryRows:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row+1, self.queryRows)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
|
||||||
|
if col >= self.queryCols:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, col+1, self.queryCols)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
|
||||||
|
|
||||||
|
self.checkRowCol(row, col)
|
||||||
|
|
||||||
|
if self.queryResult[row][col] != data:
|
||||||
|
if self.cursor.istype(col, "TIMESTAMP"):
|
||||||
|
# suppose user want to check nanosecond timestamp if a longer data passed``
|
||||||
|
if isinstance(data,str) :
|
||||||
|
if (len(data) >= 28):
|
||||||
|
if self.queryResult[row][col] == _parse_ns_timestamp(data):
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
else:
|
||||||
|
if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc):
|
||||||
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
return
|
||||||
|
elif isinstance(data,int):
|
||||||
|
if len(str(data)) == 16:
|
||||||
|
precision = 'us'
|
||||||
|
elif len(str(data)) == 13:
|
||||||
|
precision = 'ms'
|
||||||
|
elif len(str(data)) == 19:
|
||||||
|
precision = 'ns'
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
return
|
||||||
|
success = False
|
||||||
|
if precision == 'ms':
|
||||||
|
dt_obj = self.queryResult[row][col]
|
||||||
|
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000)
|
||||||
|
if ts == data:
|
||||||
|
success = True
|
||||||
|
elif precision == 'us':
|
||||||
|
dt_obj = self.queryResult[row][col]
|
||||||
|
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond)
|
||||||
|
if ts == data:
|
||||||
|
success = True
|
||||||
|
elif precision == 'ns':
|
||||||
|
if data == self.queryResult[row][col]:
|
||||||
|
success = True
|
||||||
|
if success:
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
return
|
||||||
|
elif isinstance(data,datetime.datetime):
|
||||||
|
dt_obj = self.queryResult[row][col]
|
||||||
|
delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo)
|
||||||
|
delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo)
|
||||||
|
if delt_data == delt_result:
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
|
||||||
|
if str(self.queryResult[row][col]) == str(data):
|
||||||
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
return
|
||||||
|
|
||||||
|
elif isinstance(data, float):
|
||||||
|
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
|
||||||
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
|
||||||
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
|
||||||
|
# return true or false replace exit, no print out
|
||||||
|
def checkRowColNoExit(self, row, col):
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[2][0])
|
||||||
|
if row < 0:
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row)
|
||||||
|
return False
|
||||||
|
if col < 0:
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row)
|
||||||
|
return False
|
||||||
|
if row > self.queryRows:
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, row, self.queryRows)
|
||||||
|
return False
|
||||||
|
if col > self.queryCols:
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, col, self.queryCols)
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
# return true or false replace exit, no print out
|
||||||
|
def checkDataNoExit(self, row, col, data):
|
||||||
|
if self.checkRowColNoExit(row, col) == False:
|
||||||
|
return False
|
||||||
|
if self.queryResult[row][col] != data:
|
||||||
|
if self.cursor.istype(col, "TIMESTAMP"):
|
||||||
|
# suppose user want to check nanosecond timestamp if a longer data passed
|
||||||
|
if (len(data) >= 28):
|
||||||
|
if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
if self.queryResult[row][col] == _parse_datetime(data):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
if str(self.queryResult[row][col]) == str(data):
|
||||||
|
return True
|
||||||
|
elif isinstance(data, float):
|
||||||
|
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
|
||||||
|
return True
|
||||||
|
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
# loop execute sql then sleep(waitTime) , if checkData ok break loop
|
||||||
|
def checkDataLoop(self, row, col, data, sql, loopCount, waitTime):
|
||||||
|
# loop check util checkData return true
|
||||||
|
for i in range(loopCount):
|
||||||
|
self.query(sql)
|
||||||
|
if self.checkDataNoExit(row, col, data) :
|
||||||
|
self.checkData(row, col, data)
|
||||||
|
return
|
||||||
|
time.sleep(waitTime)
|
||||||
|
|
||||||
|
# last check
|
||||||
|
self.query(sql)
|
||||||
|
self.checkData(row, col, data)
|
||||||
|
|
||||||
|
|
||||||
|
def getData(self, row, col):
|
||||||
|
self.checkRowCol(row, col)
|
||||||
|
return self.queryResult[row][col]
|
||||||
|
|
||||||
|
def getResult(self, sql):
|
||||||
|
self.sql = sql
|
||||||
|
try:
|
||||||
|
self.cursor.execute(sql)
|
||||||
|
self.queryResult = self.cursor.fetchall()
|
||||||
|
except Exception as e:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
return self.queryResult
|
||||||
|
|
||||||
|
def executeTimes(self, sql, times):
|
||||||
|
for i in range(times):
|
||||||
|
try:
|
||||||
|
return self.cursor.execute(sql)
|
||||||
|
except BaseException:
|
||||||
|
time.sleep(1)
|
||||||
|
continue
|
||||||
|
|
||||||
|
def execute(self, sql, queryTimes=30, show=False):
|
||||||
|
self.sql = sql
|
||||||
|
if show:
|
||||||
|
tdLog.info(sql)
|
||||||
|
i=1
|
||||||
|
while i <= queryTimes:
|
||||||
|
try:
|
||||||
|
self.affectedRows = self.cursor.execute(sql)
|
||||||
|
return self.affectedRows
|
||||||
|
except Exception as e:
|
||||||
|
tdLog.notice("Try to execute sql again, query times: %d "%i)
|
||||||
|
if i == queryTimes:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, sql, repr(e))
|
||||||
|
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
|
||||||
|
raise Exception(repr(e))
|
||||||
|
i+=1
|
||||||
|
time.sleep(1)
|
||||||
|
pass
|
||||||
|
|
||||||
|
def checkAffectedRows(self, expectAffectedRows):
|
||||||
|
if self.affectedRows != expectAffectedRows:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, self.affectedRows, expectAffectedRows)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, affectedRows:%d != expect:%d" % args)
|
||||||
|
|
||||||
|
tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows))
|
||||||
|
|
||||||
|
def checkColNameList(self, col_name_list, expect_col_name_list):
|
||||||
|
if col_name_list == expect_col_name_list:
|
||||||
|
tdLog.info("sql:%s, col_name_list:%s == expect_col_name_list:%s" % (self.sql, col_name_list, expect_col_name_list))
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list)
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
|
||||||
|
|
||||||
|
def __check_equal(self, elm, expect_elm):
|
||||||
|
if elm == expect_elm:
|
||||||
|
return True
|
||||||
|
if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
|
||||||
|
if len(elm) != len(expect_elm):
|
||||||
|
return False
|
||||||
|
if len(elm) == 0:
|
||||||
|
return True
|
||||||
|
for i in range(len(elm)):
|
||||||
|
flag = self.__check_equal(elm[i], expect_elm[i])
|
||||||
|
if not flag:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def checkEqual(self, elm, expect_elm):
|
||||||
|
if elm == expect_elm:
|
||||||
|
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
|
||||||
|
return
|
||||||
|
if self.__check_equal(elm, expect_elm):
|
||||||
|
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
|
||||||
|
return
|
||||||
|
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||||
|
# tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||||
|
raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||||
|
|
||||||
|
def checkNotEqual(self, elm, expect_elm):
|
||||||
|
if elm != expect_elm:
|
||||||
|
tdLog.info("sql:%s, elm:%s != expect_elm:%s" % (self.sql, elm, expect_elm))
|
||||||
|
else:
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||||
|
tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
def get_times(self, time_str, precision="ms"):
|
||||||
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
if time_str[-1] not in TAOS_TIME_INIT:
|
||||||
|
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {time_str} not a standard taos time init")
|
||||||
|
if precision not in TAOS_PRECISION:
|
||||||
|
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {precision} not a standard taos time precision")
|
||||||
|
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[0]:
|
||||||
|
times = int(time_str[:-1]) * TIME_NS
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[1]:
|
||||||
|
times = int(time_str[:-1]) * TIME_US
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[2]:
|
||||||
|
times = int(time_str[:-1]) * TIME_MS
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[3]:
|
||||||
|
times = int(time_str[:-1]) * TIME_S
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[4]:
|
||||||
|
times = int(time_str[:-1]) * TIME_M
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[5]:
|
||||||
|
times = int(time_str[:-1]) * TIME_H
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[6]:
|
||||||
|
times = int(time_str[:-1]) * TIME_D
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[7]:
|
||||||
|
times = int(time_str[:-1]) * TIME_W
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[8]:
|
||||||
|
times = int(time_str[:-1]) * TIME_N
|
||||||
|
if time_str[-1] == TAOS_TIME_INIT[9]:
|
||||||
|
times = int(time_str[:-1]) * TIME_Y
|
||||||
|
|
||||||
|
if precision == "ms":
|
||||||
|
return int(times)
|
||||||
|
elif precision == "us":
|
||||||
|
return int(times*1000)
|
||||||
|
elif precision == "ns":
|
||||||
|
return int(times*1000*1000)
|
||||||
|
|
||||||
|
def get_type(self, col):
|
||||||
|
if self.cursor.istype(col, "BOOL"):
|
||||||
|
return "BOOL"
|
||||||
|
if self.cursor.istype(col, "INT"):
|
||||||
|
return "INT"
|
||||||
|
if self.cursor.istype(col, "BIGINT"):
|
||||||
|
return "BIGINT"
|
||||||
|
if self.cursor.istype(col, "TINYINT"):
|
||||||
|
return "TINYINT"
|
||||||
|
if self.cursor.istype(col, "SMALLINT"):
|
||||||
|
return "SMALLINT"
|
||||||
|
if self.cursor.istype(col, "FLOAT"):
|
||||||
|
return "FLOAT"
|
||||||
|
if self.cursor.istype(col, "DOUBLE"):
|
||||||
|
return "DOUBLE"
|
||||||
|
if self.cursor.istype(col, "BINARY"):
|
||||||
|
return "BINARY"
|
||||||
|
if self.cursor.istype(col, "NCHAR"):
|
||||||
|
return "NCHAR"
|
||||||
|
if self.cursor.istype(col, "TIMESTAMP"):
|
||||||
|
return "TIMESTAMP"
|
||||||
|
if self.cursor.istype(col, "JSON"):
|
||||||
|
return "JSON"
|
||||||
|
if self.cursor.istype(col, "TINYINT UNSIGNED"):
|
||||||
|
return "TINYINT UNSIGNED"
|
||||||
|
if self.cursor.istype(col, "SMALLINT UNSIGNED"):
|
||||||
|
return "SMALLINT UNSIGNED"
|
||||||
|
if self.cursor.istype(col, "INT UNSIGNED"):
|
||||||
|
return "INT UNSIGNED"
|
||||||
|
if self.cursor.istype(col, "BIGINT UNSIGNED"):
|
||||||
|
return "BIGINT UNSIGNED"
|
||||||
|
|
||||||
|
def taosdStatus(self, state):
|
||||||
|
tdLog.sleep(5)
|
||||||
|
pstate = 0
|
||||||
|
for i in range(30):
|
||||||
|
pstate = 0
|
||||||
|
pl = psutil.pids()
|
||||||
|
for pid in pl:
|
||||||
|
try:
|
||||||
|
if psutil.Process(pid).name() == 'taosd':
|
||||||
|
print('have already started')
|
||||||
|
pstate = 1
|
||||||
|
break
|
||||||
|
except psutil.NoSuchProcess:
|
||||||
|
pass
|
||||||
|
if pstate == state :break
|
||||||
|
if state or pstate:
|
||||||
|
tdLog.sleep(1)
|
||||||
|
continue
|
||||||
|
pstate = 0
|
||||||
|
break
|
||||||
|
|
||||||
|
args=(pstate,state)
|
||||||
|
if pstate == state:
|
||||||
|
tdLog.info("taosd state is %d == expect:%d" %args)
|
||||||
|
else:
|
||||||
|
tdLog.exit("taosd state is %d != expect:%d" %args)
|
||||||
|
pass
|
||||||
|
|
||||||
|
def haveFile(self, dir, state):
|
||||||
|
if os.path.exists(dir) and os.path.isdir(dir):
|
||||||
|
if not os.listdir(dir):
|
||||||
|
if state :
|
||||||
|
tdLog.exit("dir: %s is empty, expect: not empty" %dir)
|
||||||
|
else:
|
||||||
|
tdLog.info("dir: %s is empty, expect: empty" %dir)
|
||||||
|
else:
|
||||||
|
if state :
|
||||||
|
tdLog.info("dir: %s is not empty, expect: not empty" %dir)
|
||||||
|
else:
|
||||||
|
tdLog.exit("dir: %s is not empty, expect: empty" %dir)
|
||||||
|
else:
|
||||||
|
tdLog.exit("dir: %s doesn't exist" %dir)
|
||||||
|
def createDir(self, dir):
|
||||||
|
if os.path.exists(dir):
|
||||||
|
shutil.rmtree(dir)
|
||||||
|
tdLog.info("dir: %s is removed" %dir)
|
||||||
|
os.makedirs( dir, 755 )
|
||||||
|
tdLog.info("dir: %s is created" %dir)
|
||||||
|
pass
|
||||||
|
|
||||||
|
tdSql = TDSql()
|
|
@ -0,0 +1,70 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from util.sql import tdSql
|
||||||
|
|
||||||
|
class TDSetSql:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
|
||||||
|
self.stbname = 'stb'
|
||||||
|
|
||||||
|
def set_create_normaltable_sql(self, ntbname='ntb',
|
||||||
|
column_dict={'ts':'timestamp','col1':'tinyint','col2':'smallint','col3':'int','col4':'bigint','col5': 'unsigned int','col6': 'unsigned tinyint','col7': 'unsigned smallint',
|
||||||
|
'col8': 'unsigned int','col9': 'unsigned bigint','col10': 'float','col11': 'double','col12': 'bool','col13': 'binary(20)','col14': 'nchar(20)'}):
|
||||||
|
column_sql = ''
|
||||||
|
for k, v in column_dict.items():
|
||||||
|
column_sql += f"{k} {v},"
|
||||||
|
create_ntb_sql = f'create table {ntbname} ({column_sql[:-1]})'
|
||||||
|
return create_ntb_sql
|
||||||
|
|
||||||
|
def set_create_stable_sql(self,stbname='stb',
|
||||||
|
column_dict={'ts':'timestamp','col1':'tinyint','col2':'smallint','col3':'int','col4':'bigint','col5': 'unsigned int','col6': 'unsigned tinyint','col7': 'unsigned smallint',
|
||||||
|
'col8': 'unsigned int','col9': 'unsigned bigint','col10': 'float','col11': 'double','col12': 'bool','col13': 'binary(20)','col14': 'nchar(20)'},
|
||||||
|
tag_dict={'ts_tag':'timestamp','t1':'tinyint','t2':'smallint','t3':'int','t4':'bigint','t5': 'unsigned int','t6': 'unsigned tinyint','t7': 'unsigned smallint',
|
||||||
|
't8': 'unsigned int','t9': 'unsigned bigint','t10': 'float','t11': 'double','t12': 'bool','t13': 'binary(20)','t14': 'nchar(20)'}):
|
||||||
|
column_sql = ''
|
||||||
|
tag_sql = ''
|
||||||
|
for k,v in column_dict.items():
|
||||||
|
column_sql += f"{k} {v},"
|
||||||
|
for k,v in tag_dict.items():
|
||||||
|
tag_sql += f"{k} {v},"
|
||||||
|
create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})'
|
||||||
|
return create_stb_sql
|
||||||
|
|
||||||
|
def set_insertsql(self,column_dict,tbname,binary_str=None,nchar_str=None):
|
||||||
|
sql = ''
|
||||||
|
for k, v in column_dict.items():
|
||||||
|
if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \
|
||||||
|
v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool':
|
||||||
|
sql += '%d,'
|
||||||
|
elif v.lower() == 'float' or v.lower() == 'double':
|
||||||
|
sql += '%f,'
|
||||||
|
elif 'binary' in v.lower():
|
||||||
|
sql += f'"{binary_str}%d",'
|
||||||
|
elif 'nchar' in v.lower():
|
||||||
|
sql += f'"{nchar_str}%d",'
|
||||||
|
return (f'insert into {tbname} values({sql[:-1]})')
|
||||||
|
|
||||||
|
def insert_values(self,column_dict,i,insert_sql,insert_list,ts):
|
||||||
|
for k, v in column_dict.items():
|
||||||
|
if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\
|
||||||
|
'binary' in v.lower() or 'nchar' in v.lower():
|
||||||
|
insert_list.append(0 + i)
|
||||||
|
elif v.lower() == 'float' or v.lower() == 'double':
|
||||||
|
insert_list.append(0.1 + i)
|
||||||
|
elif v.lower() == 'bool':
|
||||||
|
insert_list.append(i % 2)
|
||||||
|
elif v.lower() == 'timestamp':
|
||||||
|
insert_list.append(ts + i)
|
||||||
|
tdSql.execute(insert_sql%(tuple(insert_list)))
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2020 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import datetime
|
||||||
|
from util.log import *
|
||||||
|
|
||||||
|
class TDSub:
|
||||||
|
def __init__(self):
|
||||||
|
self.consumedRows = 0
|
||||||
|
self.consumedCols = 0
|
||||||
|
|
||||||
|
def init(self, sub):
|
||||||
|
self.sub = sub
|
||||||
|
|
||||||
|
def close(self, keepProgress):
|
||||||
|
self.sub.close(keepProgress)
|
||||||
|
|
||||||
|
def consume(self):
|
||||||
|
self.result = self.sub.consume()
|
||||||
|
self.result.fetch_all()
|
||||||
|
self.consumedRows = self.result.row_count
|
||||||
|
self.consumedCols = self.result.field_count
|
||||||
|
return self.consumedRows
|
||||||
|
|
||||||
|
def checkRows(self, expectRows):
|
||||||
|
if self.consumedRows != expectRows:
|
||||||
|
tdLog.exit("consumed rows:%d != expect:%d" % (self.consumedRows, expectRows))
|
||||||
|
tdLog.info("consumed rows:%d == expect:%d" % (self.consumedRows, expectRows))
|
||||||
|
|
||||||
|
|
||||||
|
tdSub = TDSub()
|
|
@ -0,0 +1,261 @@
|
||||||
|
import requests
|
||||||
|
from fabric2 import Connection
|
||||||
|
from util.log import *
|
||||||
|
from util.common import *
|
||||||
|
|
||||||
|
|
||||||
|
class TAdapter:
|
||||||
|
def __init__(self):
|
||||||
|
self.running = 0
|
||||||
|
self.deployed = 0
|
||||||
|
self.remoteIP = ""
|
||||||
|
self.taosadapter_cfg_dict = {
|
||||||
|
"debug" : True,
|
||||||
|
"taosConfigDir" : "",
|
||||||
|
"port" : 6041,
|
||||||
|
"logLevel" : "error",
|
||||||
|
"cors" : {
|
||||||
|
"allowAllOrigins" : True,
|
||||||
|
},
|
||||||
|
"pool" : {
|
||||||
|
"maxConnect" : 4000,
|
||||||
|
"maxIdle" : 4000,
|
||||||
|
"idleTimeout" : "1h"
|
||||||
|
},
|
||||||
|
"ssl" : {
|
||||||
|
"enable" : False,
|
||||||
|
"certFile" : "",
|
||||||
|
"keyFile" : "",
|
||||||
|
},
|
||||||
|
"log" : {
|
||||||
|
"path" : "",
|
||||||
|
"rotationCount" : 30,
|
||||||
|
"rotationTime" : "24h",
|
||||||
|
"rotationSize" : "1GB",
|
||||||
|
"enableRecordHttpSql" : True,
|
||||||
|
"sqlRotationCount" : 2,
|
||||||
|
"sqlRotationTime" : "24h",
|
||||||
|
"sqlRotationSize" : "1GB",
|
||||||
|
},
|
||||||
|
"monitor" : {
|
||||||
|
"collectDuration" : "3s",
|
||||||
|
"incgroup" : False,
|
||||||
|
"pauseQueryMemoryThreshold" : 70,
|
||||||
|
"pauseAllMemoryThreshold" : 80,
|
||||||
|
"identity" : "",
|
||||||
|
"writeToTD" : True,
|
||||||
|
"user" : "root",
|
||||||
|
"password" : "taosdata",
|
||||||
|
"writeInterval" : "30s"
|
||||||
|
},
|
||||||
|
"opentsdb" : {
|
||||||
|
"enable" : True
|
||||||
|
},
|
||||||
|
"influxdb" : {
|
||||||
|
"enable" : True
|
||||||
|
},
|
||||||
|
"statsd" : {
|
||||||
|
"enable" : True
|
||||||
|
},
|
||||||
|
"collectd" : {
|
||||||
|
"enable" : True
|
||||||
|
},
|
||||||
|
"opentsdb_telnet" : {
|
||||||
|
"enable" : True
|
||||||
|
},
|
||||||
|
"node_exporter" : {
|
||||||
|
"enable" : True
|
||||||
|
},
|
||||||
|
"prometheus" : {
|
||||||
|
"enable" : True
|
||||||
|
},
|
||||||
|
}
|
||||||
|
# TODO: add taosadapter env:
|
||||||
|
# 1. init cfg.toml.dict :OK
|
||||||
|
# 2. dump dict to toml : OK
|
||||||
|
# 3. update cfg.toml.dict :OK
|
||||||
|
# 4. check adapter exists : OK
|
||||||
|
# 5. deploy adapter cfg : OK
|
||||||
|
# 6. adapter start : OK
|
||||||
|
# 7. adapter stop
|
||||||
|
|
||||||
|
def init(self, path, remoteIP=""):
|
||||||
|
self.path = path
|
||||||
|
self.remoteIP = remoteIP
|
||||||
|
binPath = get_path() + "/../../../"
|
||||||
|
binPath = os.path.realpath(binPath)
|
||||||
|
|
||||||
|
if path == "":
|
||||||
|
self.path = os.path.abspath(binPath + "../../")
|
||||||
|
else:
|
||||||
|
self.path = os.path.realpath(path)
|
||||||
|
|
||||||
|
if self.remoteIP:
|
||||||
|
try:
|
||||||
|
self.config = eval(remoteIP)
|
||||||
|
self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
|
||||||
|
except Exception as e:
|
||||||
|
tdLog.notice(e)
|
||||||
|
|
||||||
|
def update_cfg(self, update_dict :dict):
|
||||||
|
if not isinstance(update_dict, dict):
|
||||||
|
return
|
||||||
|
if "log" in update_dict and "path" in update_dict["log"]:
|
||||||
|
del update_dict["log"]["path"]
|
||||||
|
for key, value in update_dict.items():
|
||||||
|
if key in ["cors", "pool", "ssl", "log", "monitor", "opentsdb", "influxdb", "statsd", "collectd", "opentsdb_telnet", "node_exporter", "prometheus"]:
|
||||||
|
if isinstance(value, dict):
|
||||||
|
for k, v in value.items():
|
||||||
|
self.taosadapter_cfg_dict[key][k] = v
|
||||||
|
else:
|
||||||
|
self.taosadapter_cfg_dict[key] = value
|
||||||
|
|
||||||
|
def check_adapter(self):
|
||||||
|
if get_path(tool="taosadapter"):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def remote_exec(self, updateCfgDict, execCmd):
|
||||||
|
remoteCfgDict = copy.deepcopy(updateCfgDict)
|
||||||
|
if "log" in remoteCfgDict and "path" in remoteCfgDict["log"]:
|
||||||
|
del remoteCfgDict["log"]["path"]
|
||||||
|
|
||||||
|
remoteCfgDictStr = base64.b64encode(toml.dumps(remoteCfgDict).encode()).decode()
|
||||||
|
execCmdStr = base64.b64encode(execCmd.encode()).decode()
|
||||||
|
with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
|
||||||
|
self.remote_conn.run(f"python3 ./test.py -D {remoteCfgDictStr} -e {execCmdStr}" )
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = f"echo {option} = {value} >> {self.cfg_path}"
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def deploy(self, *update_cfg_dict):
|
||||||
|
self.log_dir = os.path.join(self.path,"sim","dnode1","log")
|
||||||
|
self.cfg_dir = os.path.join(self.path,"sim","dnode1","cfg")
|
||||||
|
self.cfg_path = os.path.join(self.cfg_dir,"taosadapter.toml")
|
||||||
|
|
||||||
|
cmd = f"touch {self.cfg_path}"
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
self.taosadapter_cfg_dict["log"]["path"] = self.log_dir
|
||||||
|
if bool(update_cfg_dict):
|
||||||
|
self.update_cfg(update_dict=update_cfg_dict)
|
||||||
|
|
||||||
|
if (self.remoteIP == ""):
|
||||||
|
dict2toml(self.taosadapter_cfg_dict, self.cfg_path)
|
||||||
|
else:
|
||||||
|
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.deploy(update_cfg_dict)")
|
||||||
|
|
||||||
|
self.deployed = 1
|
||||||
|
|
||||||
|
tdLog.debug(f"taosadapter is deployed and configured by {self.cfg_path}")
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
bin_path = get_path(tool="taosadapter")
|
||||||
|
|
||||||
|
if (bin_path == ""):
|
||||||
|
tdLog.exit("taosadapter not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info(f"taosadapter found: {bin_path}")
|
||||||
|
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = f"mintty -h never {bin_path} -c {self.cfg_path}"
|
||||||
|
else:
|
||||||
|
cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null & "
|
||||||
|
|
||||||
|
if self.remoteIP:
|
||||||
|
self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
|
||||||
|
self.running = 1
|
||||||
|
else:
|
||||||
|
os.system(f"rm -rf {self.log_dir}{os.sep}taosadapter*")
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug(f"taosadapter is running with {cmd} " )
|
||||||
|
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
taosadapter_port = self.taosadapter_cfg_dict["port"]
|
||||||
|
for i in range(5):
|
||||||
|
ip = 'localhost'
|
||||||
|
if self.remoteIP != "":
|
||||||
|
ip = self.remoteIP
|
||||||
|
url = f'http://{ip}:{taosadapter_port}/-/ping'
|
||||||
|
try:
|
||||||
|
r = requests.get(url)
|
||||||
|
if r.status_code == 200:
|
||||||
|
tdLog.info(f"the taosadapter has been started, using port:{taosadapter_port}")
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
tdLog.info(f"the taosadapter do not started!!!")
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def start_taosadapter(self):
|
||||||
|
"""
|
||||||
|
use this method, must deploy taosadapter
|
||||||
|
"""
|
||||||
|
bin_path = get_path(tool="taosadapter")
|
||||||
|
|
||||||
|
if (bin_path == ""):
|
||||||
|
tdLog.exit("taosadapter not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info(f"taosadapter found: {bin_path}")
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("taosadapter is not deployed")
|
||||||
|
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}"
|
||||||
|
else:
|
||||||
|
cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null & "
|
||||||
|
|
||||||
|
if self.remoteIP:
|
||||||
|
self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
|
||||||
|
self.running = 1
|
||||||
|
else:
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug(f"taosadapter is running with {cmd} " )
|
||||||
|
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
def stop(self, force_kill=False):
|
||||||
|
signal = "-9" if force_kill else "-15"
|
||||||
|
if self.remoteIP:
|
||||||
|
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.running=1\ntAdapter.stop()")
|
||||||
|
tdLog.info("stop taosadapter")
|
||||||
|
return
|
||||||
|
toBeKilled = "taosadapter"
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill %s %s > nul 2>&1" % (signal, processID)
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug(f"taosadapter is stopped by kill {signal}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
while(processID):
|
||||||
|
killCmd = "kill %s %s > /dev/null 2>&1" % (signal, processID)
|
||||||
|
os.system(killCmd)
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
port = 6041
|
||||||
|
fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
|
||||||
|
os.system(fuserCmd)
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug(f"taosadapter is stopped by kill {signal}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
tAdapter = TAdapter()
|
|
@ -0,0 +1,465 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import datetime
|
||||||
|
import inspect
|
||||||
|
import psutil
|
||||||
|
import shutil
|
||||||
|
import json
|
||||||
|
from util.log import *
|
||||||
|
from multiprocessing import cpu_count
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: fully test the function. Handle exceptions.
|
||||||
|
# Handle json format not accepted by taosdemo
|
||||||
|
|
||||||
|
### How to use TaosdemoCfg:
|
||||||
|
# Before you start:
|
||||||
|
# Make sure you understand how is taosdemo's JSON file structured. Because the python used does
|
||||||
|
# not support directory in directory for self objects, the config is being tear to different parts.
|
||||||
|
# Please make sure you understand which directory represent which part of which type of the file
|
||||||
|
# This module will reassemble the parts when creating the JSON file.
|
||||||
|
#
|
||||||
|
# Basic use example
|
||||||
|
# step 1:use self.append_sql_stb() to append the insert/query/subscribe directory into the module
|
||||||
|
# you can append many insert/query/subscribe directory, but pay attention about taosdemo's limit
|
||||||
|
# step 2:use alter function to alter the specific config
|
||||||
|
# step 3:use the generation function to generate the files
|
||||||
|
#
|
||||||
|
# step 1 and step 2 can be replaced with using import functions
|
||||||
|
class TDTaosdemoCfg:
|
||||||
|
def __init__(self):
|
||||||
|
self.insert_cfg = {
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"thread_count": cpu_count(),
|
||||||
|
"create_table_thread_count": cpu_count(),
|
||||||
|
"result_file": "./insert_res.txt",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"num_of_records_per_req": 32766,
|
||||||
|
"max_sql_len": 32766,
|
||||||
|
"databases": None
|
||||||
|
}
|
||||||
|
|
||||||
|
self.db = {
|
||||||
|
"name": 'db',
|
||||||
|
"drop": 'yes',
|
||||||
|
"replica": 1,
|
||||||
|
"duration": 10,
|
||||||
|
"cache": 16,
|
||||||
|
"blocks": 6,
|
||||||
|
"precision": "ms",
|
||||||
|
"keep": 3650,
|
||||||
|
"minRows": 100,
|
||||||
|
"maxRows": 4096,
|
||||||
|
"comp": 2,
|
||||||
|
"walLevel": 1,
|
||||||
|
"cachelast": 0,
|
||||||
|
"quorum": 1,
|
||||||
|
"fsync": 3000,
|
||||||
|
"update": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
self.query_cfg = {
|
||||||
|
"filetype": "query",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"databases": "db",
|
||||||
|
"query_times": 2,
|
||||||
|
"query_mode": "taosc",
|
||||||
|
"specified_table_query": None,
|
||||||
|
"super_table_query": None
|
||||||
|
}
|
||||||
|
|
||||||
|
self.table_query = {
|
||||||
|
"query_interval": 1,
|
||||||
|
"concurrent": 3,
|
||||||
|
"sqls": None
|
||||||
|
}
|
||||||
|
|
||||||
|
self.stable_query = {
|
||||||
|
"stblname": "stb",
|
||||||
|
"query_interval": 1,
|
||||||
|
"threads": 3,
|
||||||
|
"sqls": None
|
||||||
|
}
|
||||||
|
|
||||||
|
self.sub_cfg = {
|
||||||
|
"filetype": "subscribe",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"databases": "db",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"specified_table_query": None,
|
||||||
|
"super_table_query": None
|
||||||
|
}
|
||||||
|
|
||||||
|
self.table_sub = {
|
||||||
|
"concurrent": 1,
|
||||||
|
"mode": "sync",
|
||||||
|
"interval": 10000,
|
||||||
|
"restart": "yes",
|
||||||
|
"keepProgress": "yes",
|
||||||
|
"sqls": None
|
||||||
|
}
|
||||||
|
|
||||||
|
self.stable_sub = {
|
||||||
|
"stblname": "stb",
|
||||||
|
"threads": 1,
|
||||||
|
"mode": "sync",
|
||||||
|
"interval": 10000,
|
||||||
|
"restart": "yes",
|
||||||
|
"keepProgress": "yes",
|
||||||
|
"sqls": None
|
||||||
|
}
|
||||||
|
|
||||||
|
self.stbs = []
|
||||||
|
self.stb_template = {
|
||||||
|
"name": "stb",
|
||||||
|
"child_table_exists": "no",
|
||||||
|
"childtable_count": 100,
|
||||||
|
"childtable_prefix": "stb_",
|
||||||
|
"auto_create_table": "no",
|
||||||
|
"batch_create_tbl_num": 5,
|
||||||
|
"data_source": "rand",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"insert_rows": 100,
|
||||||
|
"childtable_limit": 10,
|
||||||
|
"childtable_offset": 0,
|
||||||
|
"interlace_rows": 0,
|
||||||
|
"insert_interval": 0,
|
||||||
|
"max_sql_len": 32766,
|
||||||
|
"disorder_ratio": 0,
|
||||||
|
"disorder_range": 1000,
|
||||||
|
"timestamp_step": 1,
|
||||||
|
"start_timestamp": "2020-10-01 00:00:00.000",
|
||||||
|
"sample_format": "csv",
|
||||||
|
"sample_file": "./sample.csv",
|
||||||
|
"tags_file": "",
|
||||||
|
"columns": [{"type": "INT", "count": 1}],
|
||||||
|
"tags": [{"type": "BIGINT", "count": 1}]
|
||||||
|
}
|
||||||
|
|
||||||
|
self.tb_query_sql = []
|
||||||
|
self.tb_query_sql_template = {
|
||||||
|
"sql": "select last_row(*) from stb_0 ",
|
||||||
|
"result": "temp/query_res0.txt"
|
||||||
|
}
|
||||||
|
|
||||||
|
self.stb_query_sql = []
|
||||||
|
self.stb_query_sql_template = {
|
||||||
|
"sql": "select last_row(ts) from xxxx",
|
||||||
|
"result": "temp/query_res2.txt"
|
||||||
|
}
|
||||||
|
|
||||||
|
self.tb_sub_sql = []
|
||||||
|
self.tb_sub_sql_template = {
|
||||||
|
"sql": "select * from stb_0 ;",
|
||||||
|
"result": "temp/subscribe_res0.txt"
|
||||||
|
}
|
||||||
|
|
||||||
|
self.stb_sub_sql = []
|
||||||
|
self.stb_sub_sql_template = {
|
||||||
|
"sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;",
|
||||||
|
"result": "temp/subscribe_res1.txt"
|
||||||
|
}
|
||||||
|
|
||||||
|
# The following functions are import functions for different dicts and lists
|
||||||
|
# except import_sql, all other import functions will a dict and overwrite the origional dict
|
||||||
|
# dict_in: the dict used to overwrite the target
|
||||||
|
def import_insert_cfg(self, dict_in):
|
||||||
|
self.insert_cfg = dict_in
|
||||||
|
|
||||||
|
def import_db(self, dict_in):
|
||||||
|
self.db = dict_in
|
||||||
|
|
||||||
|
def import_stbs(self, dict_in):
|
||||||
|
self.stbs = dict_in
|
||||||
|
|
||||||
|
def import_query_cfg(self, dict_in):
|
||||||
|
self.query_cfg = dict_in
|
||||||
|
|
||||||
|
def import_table_query(self, dict_in):
|
||||||
|
self.table_query = dict_in
|
||||||
|
|
||||||
|
def import_stable_query(self, dict_in):
|
||||||
|
self.stable_query = dict_in
|
||||||
|
|
||||||
|
def import_sub_cfg(self, dict_in):
|
||||||
|
self.sub_cfg = dict_in
|
||||||
|
|
||||||
|
def import_table_sub(self, dict_in):
|
||||||
|
self.table_sub = dict_in
|
||||||
|
|
||||||
|
def import_stable_sub(self, dict_in):
|
||||||
|
self.stable_sub = dict_in
|
||||||
|
|
||||||
|
def import_sql(self, Sql_in, mode):
|
||||||
|
"""used for importing the sql later used
|
||||||
|
|
||||||
|
Args:
|
||||||
|
Sql_in (dict): the imported sql dict
|
||||||
|
mode (str): the sql storing location within TDTaosdemoCfg
|
||||||
|
format: 'fileType_tableType'
|
||||||
|
fileType: query, sub
|
||||||
|
tableType: table, stable
|
||||||
|
"""
|
||||||
|
if mode == 'query_table':
|
||||||
|
self.tb_query_sql = Sql_in
|
||||||
|
elif mode == 'query_stable':
|
||||||
|
self.stb_query_sql = Sql_in
|
||||||
|
elif mode == 'sub_table':
|
||||||
|
self.tb_sub_sql = Sql_in
|
||||||
|
elif mode == 'sub_stable':
|
||||||
|
self.stb_sub_sql = Sql_in
|
||||||
|
# import functions end
|
||||||
|
|
||||||
|
# The following functions are alter functions for different dicts
|
||||||
|
# Args:
|
||||||
|
# key: the key that is going to be modified
|
||||||
|
# value: the value of the key that is going to be modified
|
||||||
|
# if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls"
|
||||||
|
# value will not be used
|
||||||
|
|
||||||
|
def alter_insert_cfg(self, key, value):
|
||||||
|
|
||||||
|
if key == 'databases':
|
||||||
|
self.insert_cfg[key] = [
|
||||||
|
{
|
||||||
|
'dbinfo': self.db,
|
||||||
|
'super_tables': self.stbs
|
||||||
|
}
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
self.insert_cfg[key] = value
|
||||||
|
|
||||||
|
def alter_db(self, key, value):
|
||||||
|
self.db[key] = value
|
||||||
|
|
||||||
|
def alter_query_tb(self, key, value):
|
||||||
|
if key == "sqls":
|
||||||
|
self.table_query[key] = self.tb_query_sql
|
||||||
|
else:
|
||||||
|
self.table_query[key] = value
|
||||||
|
|
||||||
|
def alter_query_stb(self, key, value):
|
||||||
|
if key == "sqls":
|
||||||
|
self.stable_query[key] = self.stb_query_sql
|
||||||
|
else:
|
||||||
|
self.stable_query[key] = value
|
||||||
|
|
||||||
|
def alter_query_cfg(self, key, value):
|
||||||
|
if key == "specified_table_query":
|
||||||
|
self.query_cfg["specified_table_query"] = self.table_query
|
||||||
|
elif key == "super_table_query":
|
||||||
|
self.query_cfg["super_table_query"] = self.stable_query
|
||||||
|
else:
|
||||||
|
self.query_cfg[key] = value
|
||||||
|
|
||||||
|
def alter_sub_cfg(self, key, value):
|
||||||
|
if key == "specified_table_query":
|
||||||
|
self.sub_cfg["specified_table_query"] = self.table_sub
|
||||||
|
elif key == "super_table_query":
|
||||||
|
self.sub_cfg["super_table_query"] = self.stable_sub
|
||||||
|
else:
|
||||||
|
self.sub_cfg[key] = value
|
||||||
|
|
||||||
|
def alter_sub_stb(self, key, value):
|
||||||
|
if key == "sqls":
|
||||||
|
self.stable_sub[key] = self.stb_sub_sql
|
||||||
|
else:
|
||||||
|
self.stable_sub[key] = value
|
||||||
|
|
||||||
|
def alter_sub_tb(self, key, value):
|
||||||
|
if key == "sqls":
|
||||||
|
self.table_sub[key] = self.tb_sub_sql
|
||||||
|
else:
|
||||||
|
self.table_sub[key] = value
|
||||||
|
# alter function ends
|
||||||
|
|
||||||
|
# the following functions are for handling the sql lists
|
||||||
|
def append_sql_stb(self, target, value):
|
||||||
|
"""for appending sql dict into specific sql list
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): the target append list
|
||||||
|
format: 'fileType_tableType'
|
||||||
|
fileType: query, sub
|
||||||
|
tableType: table, stable
|
||||||
|
unique: 'insert_stbs'
|
||||||
|
value (dict): the sql dict going to be appended
|
||||||
|
"""
|
||||||
|
if target == 'insert_stbs':
|
||||||
|
self.stbs.append(value)
|
||||||
|
elif target == 'query_table':
|
||||||
|
self.tb_query_sql.append(value)
|
||||||
|
elif target == 'query_stable':
|
||||||
|
self.stb_query_sql.append(value)
|
||||||
|
elif target == 'sub_table':
|
||||||
|
self.tb_sub_sql.append(value)
|
||||||
|
elif target == 'sub_stable':
|
||||||
|
self.stb_sub_sql.append(value)
|
||||||
|
|
||||||
|
def pop_sql_stb(self, target, index):
|
||||||
|
"""for poping a sql dict from specific sql list
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): the target append list
|
||||||
|
format: 'fileType_tableType'
|
||||||
|
fileType: query, sub
|
||||||
|
tableType: table, stable
|
||||||
|
unique: 'insert_stbs'
|
||||||
|
index (int): the sql dict that is going to be popped
|
||||||
|
"""
|
||||||
|
if target == 'insert_stbs':
|
||||||
|
self.stbs.pop(index)
|
||||||
|
elif target == 'query_table':
|
||||||
|
self.tb_query_sql.pop(index)
|
||||||
|
elif target == 'query_stable':
|
||||||
|
self.stb_query_sql.pop(index)
|
||||||
|
elif target == 'sub_table':
|
||||||
|
self.tb_sub_sql.pop(index)
|
||||||
|
elif target == 'sub_stable':
|
||||||
|
self.stb_sub_sql.pop(index)
|
||||||
|
# sql list modification function end
|
||||||
|
|
||||||
|
# The following functions are get functions for different dicts
|
||||||
|
def get_db(self):
|
||||||
|
return self.db
|
||||||
|
|
||||||
|
def get_stb(self):
|
||||||
|
return self.stbs
|
||||||
|
|
||||||
|
def get_insert_cfg(self):
|
||||||
|
return self.insert_cfg
|
||||||
|
|
||||||
|
def get_query_cfg(self):
|
||||||
|
return self.query_cfg
|
||||||
|
|
||||||
|
def get_tb_query(self):
|
||||||
|
return self.table_query
|
||||||
|
|
||||||
|
def get_stb_query(self):
|
||||||
|
return self.stable_query
|
||||||
|
|
||||||
|
def get_sub_cfg(self):
|
||||||
|
return self.sub_cfg
|
||||||
|
|
||||||
|
def get_tb_sub(self):
|
||||||
|
return self.table_sub
|
||||||
|
|
||||||
|
def get_stb_sub(self):
|
||||||
|
return self.stable_sub
|
||||||
|
|
||||||
|
def get_sql(self, target):
|
||||||
|
"""general get function for all sql lists
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): the sql list want to get
|
||||||
|
format: 'fileType_tableType'
|
||||||
|
fileType: query, sub
|
||||||
|
tableType: table, stable
|
||||||
|
unique: 'insert_stbs'
|
||||||
|
"""
|
||||||
|
if target == 'query_table':
|
||||||
|
return self.tb_query_sql
|
||||||
|
elif target == 'query_stable':
|
||||||
|
return self.stb_query_sql
|
||||||
|
elif target == 'sub_table':
|
||||||
|
return self.tb_sub_sql
|
||||||
|
elif target == 'sub_stable':
|
||||||
|
return self.stb_sub_sql
|
||||||
|
|
||||||
|
def get_template(self, target):
|
||||||
|
"""general get function for the default sql template
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): the sql list want to get
|
||||||
|
format: 'fileType_tableType'
|
||||||
|
fileType: query, sub
|
||||||
|
tableType: table, stable
|
||||||
|
unique: 'insert_stbs'
|
||||||
|
"""
|
||||||
|
if target == 'insert_stbs':
|
||||||
|
return self.stb_template
|
||||||
|
elif target == 'query_table':
|
||||||
|
return self.tb_query_sql_template
|
||||||
|
elif target == 'query_stable':
|
||||||
|
return self.stb_query_sql_template
|
||||||
|
elif target == 'sub_table':
|
||||||
|
return self.tb_sub_sql_template
|
||||||
|
elif target == 'sub_stable':
|
||||||
|
return self.stb_sub_sql_template
|
||||||
|
else:
|
||||||
|
print(f'did not find {target}')
|
||||||
|
|
||||||
|
# the folloing are the file generation functions
|
||||||
|
"""defalut document:
|
||||||
|
generator functio for generating taosdemo json file
|
||||||
|
will assemble the dicts and dump the final json
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pathName (str): the directory wanting the json file to be
|
||||||
|
fileName (str): the name suffix of the json file
|
||||||
|
Returns:
|
||||||
|
str: [pathName]/[filetype]_[filName].json
|
||||||
|
"""
|
||||||
|
|
||||||
|
def generate_insert_cfg(self, pathName, fileName):
|
||||||
|
cfgFileName = f'{pathName}/insert_{fileName}.json'
|
||||||
|
self.alter_insert_cfg('databases', None)
|
||||||
|
with open(cfgFileName, 'w') as file:
|
||||||
|
json.dump(self.insert_cfg, file)
|
||||||
|
return cfgFileName
|
||||||
|
|
||||||
|
def generate_query_cfg(self, pathName, fileName):
|
||||||
|
cfgFileName = f'{pathName}/query_{fileName}.json'
|
||||||
|
self.alter_query_tb('sqls', None)
|
||||||
|
self.alter_query_stb('sqls', None)
|
||||||
|
self.alter_query_cfg('specified_table_query', None)
|
||||||
|
self.alter_query_cfg('super_table_query', None)
|
||||||
|
with open(cfgFileName, 'w') as file:
|
||||||
|
json.dump(self.query_cfg, file)
|
||||||
|
return cfgFileName
|
||||||
|
|
||||||
|
def generate_subscribe_cfg(self, pathName, fileName):
|
||||||
|
cfgFileName = f'{pathName}/subscribe_{fileName}.json'
|
||||||
|
self.alter_sub_tb('sqls', None)
|
||||||
|
self.alter_sub_stb('sqls', None)
|
||||||
|
self.alter_sub_cfg('specified_table_query', None)
|
||||||
|
self.alter_sub_cfg('super_table_query', None)
|
||||||
|
with open(cfgFileName, 'w') as file:
|
||||||
|
json.dump(self.sub_cfg, file)
|
||||||
|
return cfgFileName
|
||||||
|
# file generation functions ends
|
||||||
|
|
||||||
|
def drop_cfg_file(self, fileName):
|
||||||
|
os.remove(f'{fileName}')
|
||||||
|
|
||||||
|
|
||||||
|
taosdemoCfg = TDTaosdemoCfg()
|
|
@ -0,0 +1,38 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
class TDSmlProtocolType(Enum):
|
||||||
|
'''
|
||||||
|
Schemaless Protocol types
|
||||||
|
0 - unknown
|
||||||
|
1 - InfluxDB Line Protocol
|
||||||
|
2 - OpenTSDB Telnet Protocl
|
||||||
|
3 - OpenTSDB JSON Protocol
|
||||||
|
'''
|
||||||
|
UNKNOWN = 0
|
||||||
|
LINE = 1
|
||||||
|
TELNET = 2
|
||||||
|
JSON = 3
|
||||||
|
|
||||||
|
class TDSmlTimestampType(Enum):
|
||||||
|
NOT_CONFIGURED = 0
|
||||||
|
HOUR = 1
|
||||||
|
MINUTE = 2
|
||||||
|
SECOND = 3
|
||||||
|
MILLI_SECOND = 4
|
||||||
|
MICRO_SECOND = 5
|
||||||
|
NANO_SECOND = 6
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue