From 5ffca8772fbe7cf4c56ea744a7578a5ba80c2a49 Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Fri, 8 Dec 2023 14:43:35 +0800 Subject: [PATCH] feat: army folder create --- tests/army/frame/__init__.py | 0 tests/army/frame/autogen.py | 163 ++ tests/army/frame/boundary.py | 44 + tests/army/frame/cases.py | 150 ++ tests/army/frame/cluster.py | 108 ++ tests/army/frame/common.py | 1867 +++++++++++++++++++++ tests/army/frame/constant.py | 197 +++ tests/army/frame/dnodes-default.py | 502 ++++++ tests/army/frame/dnodes-no-random-fail.py | 500 ++++++ tests/army/frame/dnodes-random-fail.py | 497 ++++++ tests/army/frame/dnodes.py | 890 ++++++++++ tests/army/frame/gettime.py | 65 + tests/army/frame/log.py | 49 + tests/army/frame/pathFinding.py | 83 + tests/army/frame/sql.py | 655 ++++++++ tests/army/frame/sqlset.py | 70 + tests/army/frame/sub.py | 44 + tests/army/frame/taosadapter.py | 261 +++ tests/army/frame/taosdemoCfg.py | 465 +++++ tests/army/frame/types.py | 38 + 20 files changed, 6648 insertions(+) create mode 100644 tests/army/frame/__init__.py create mode 100644 tests/army/frame/autogen.py create mode 100644 tests/army/frame/boundary.py create mode 100644 tests/army/frame/cases.py create mode 100644 tests/army/frame/cluster.py create mode 100644 tests/army/frame/common.py create mode 100644 tests/army/frame/constant.py create mode 100644 tests/army/frame/dnodes-default.py create mode 100644 tests/army/frame/dnodes-no-random-fail.py create mode 100644 tests/army/frame/dnodes-random-fail.py create mode 100644 tests/army/frame/dnodes.py create mode 100644 tests/army/frame/gettime.py create mode 100644 tests/army/frame/log.py create mode 100644 tests/army/frame/pathFinding.py create mode 100644 tests/army/frame/sql.py create mode 100644 tests/army/frame/sqlset.py create mode 100644 tests/army/frame/sub.py create mode 100644 tests/army/frame/taosadapter.py create mode 100644 tests/army/frame/taosdemoCfg.py create mode 100644 tests/army/frame/types.py diff --git a/tests/army/frame/__init__.py b/tests/army/frame/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/army/frame/autogen.py b/tests/army/frame/autogen.py new file mode 100644 index 0000000000..e1227a680f --- /dev/null +++ b/tests/army/frame/autogen.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +import threading +import random +import string +import time + + +# +# Auto Gen class +# +class AutoGen: + def __init__(self): + self.ts = 1600000000000 + self.batch_size = 100 + seed = time.time() % 10000 + random.seed(seed) + + # set start ts + def set_start_ts(self, ts): + self.ts = ts + + # set batch size + def set_batch_size(self, batch_size): + self.batch_size = batch_size + + # _columns_sql + def gen_columns_sql(self, pre, cnt, binary_len, nchar_len): + types = [ + 'timestamp', + 'tinyint', + 'smallint', + 'tinyint unsigned', + 'smallint unsigned', + 'int', + 'bigint', + 'int unsigned', + 'bigint unsigned', + 'float', + 'double', + 'bool', + f'varchar({binary_len})', + f'nchar({nchar_len})' + ] + + sqls = "" + metas = [] + for i in range(cnt): + colname = f"{pre}{i}" + sel = i % len(types) + coltype = types[sel] + sql = f"{colname} {coltype}" + if sqls != "": + sqls += "," + sqls += sql + metas.append(sel) + + return metas, sqls; + + # gen tags data + def gen_data(self, i, marr): + datas = "" + for c in marr: + data = "" + if c == 0 : # timestamp + data = "%d" % (self.ts + i) + elif c <= 4 : # small + data = "%d"%(i%128) + elif c <= 8 : # int + data = f"{i}" + elif c <= 10 : # float + data = "%f"%(i+i/1000) + elif c <= 11 : # bool + data = "%d"%(i%2) + elif c == 12 : # binary + data = '"' + self.random_string(self.bin_len) + '"' + elif c == 13 : # binary + data = '"' + self.random_string(self.nch_len) + '"' + + if datas != "": + datas += "," + datas += data + + return datas + + # generate specail wide random string + def random_string(self, count): + letters = string.ascii_letters + return ''.join(random.choice(letters) for i in range(count)) + + # create db + def create_db(self, dbname, vgroups = 2, replica = 1): + self.dbname = dbname + tdSql.execute(f'create database {dbname} vgroups {vgroups} replica {replica}') + tdSql.execute(f'use {dbname}') + + # create table or stable + def create_stable(self, stbname, tag_cnt, column_cnt, binary_len, nchar_len): + self.bin_len = binary_len + self.nch_len = nchar_len + self.stbname = stbname + self.mtags, tags = self.gen_columns_sql("t", tag_cnt, binary_len, nchar_len) + self.mcols, cols = self.gen_columns_sql("c", column_cnt - 1, binary_len, nchar_len) + + sql = f"create table {stbname} (ts timestamp, {cols}) tags({tags})" + tdSql.execute(sql) + + # create child table + def create_child(self, stbname, prename, cnt): + self.child_cnt = cnt + self.child_name = prename + for i in range(cnt): + tags_data = self.gen_data(i, self.mtags) + sql = f"create table {prename}{i} using {stbname} tags({tags_data})" + tdSql.execute(sql) + + tdLog.info(f"create child tables {cnt} ok") + + def insert_data_child(self, child_name, cnt, batch_size, step): + values = "" + print("insert child data") + ts = self.ts + + # loop do + for i in range(cnt): + value = self.gen_data(i, self.mcols) + ts += step + values += f"({ts},{value}) " + if batch_size == 1 or (i > 0 and i % batch_size == 0) : + sql = f"insert into {child_name} values {values}" + tdSql.execute(sql) + values = "" + + # end batch + if values != "": + sql = f"insert into {child_name} values {values}" + tdSql.execute(sql) + tdLog.info(f" insert data i={i}") + values = "" + + tdLog.info(f" insert child data {child_name} finished, insert rows={cnt}") + + # insert data + def insert_data(self, cnt): + for i in range(self.child_cnt): + name = f"{self.child_name}{i}" + self.insert_data_child(name, cnt, self.batch_size, 1) + + tdLog.info(f" insert data ok, child table={self.child_cnt} insert rows={cnt}") + + # insert same timestamp to all childs + def insert_samets(self, cnt): + for i in range(self.child_cnt): + name = f"{self.child_name}{i}" + self.insert_data_child(name, cnt, self.batch_size, 0) + + tdLog.info(f" insert same timestamp ok, child table={self.child_cnt} insert rows={cnt}") + + diff --git a/tests/army/frame/boundary.py b/tests/army/frame/boundary.py new file mode 100644 index 0000000000..086821e7cf --- /dev/null +++ b/tests/army/frame/boundary.py @@ -0,0 +1,44 @@ +class DataBoundary: + def __init__(self): + self.TINYINT_BOUNDARY = [-128, 127] + self.SMALLINT_BOUNDARY = [-32768, 32767] + self.INT_BOUNDARY = [-2147483648, 2147483647] + self.BIGINT_BOUNDARY = [-9223372036854775808, 9223372036854775807] + self.UTINYINT_BOUNDARY = [0, 255] + self.USMALLINT_BOUNDARY = [0, 65535] + self.UINT_BOUNDARY = [0, 4294967295] + self.UBIGINT_BOUNDARY = [0, 18446744073709551615] + self.FLOAT_BOUNDARY = [-3.40E+38, 3.40E+38] + self.DOUBLE_BOUNDARY = [-1.7e+308, 1.7e+308] + self.BOOL_BOUNDARY = [True, False] + self.BINARY_MAX_LENGTH = 16374 + self.NCHAR_MAX_LENGTH = 4093 + self.DBNAME_MAX_LENGTH = 64 + self.STBNAME_MAX_LENGTH = 192 + self.TBNAME_MAX_LENGTH = 192 + self.CHILD_TBNAME_MAX_LENGTH = 192 + self.TAG_KEY_MAX_LENGTH = 64 + self.COL_KEY_MAX_LENGTH = 64 + self.MAX_TAG_COUNT = 128 + self.MAX_TAG_COL_COUNT = 4096 + self.mnodeShmSize = [6292480, 2147483647] + self.mnodeShmSize_default = 6292480 + self.vnodeShmSize = [6292480, 2147483647] + self.vnodeShmSize_default = 31458304 + self.DB_PARAM_BUFFER_CONFIG = {"create_name": "buffer", "query_name": "buffer", "vnode_json_key": "szBuf", "boundary": [3, 16384], "default": 96} + self.DB_PARAM_CACHELAST_CONFIG = {"create_name": "cachelast", "query_name": "cache_model", "vnode_json_key": "", "boundary": [0, 1, 2, 3], "default": 0} + self.DB_PARAM_COMP_CONFIG = {"create_name": "comp", "query_name": "compression", "vnode_json_key": "", "boundary": [0, 1, 2], "default": 2} + self.DB_PARAM_DURATION_CONFIG = {"create_name": "duration", "query_name": "duration", "vnode_json_key": "daysPerFile", "boundary": [1, 3650, '60m', '5256000m', '1h', '87600h', '1d', '3650d'], "default": "14400m"} + self.DB_PARAM_FSYNC_CONFIG = {"create_name": "fsync", "query_name": "fsync", "vnode_json_key": "", "boundary": [0, 180000], "default": 3000} + self.DB_PARAM_KEEP_CONFIG = {"create_name": "keep", "query_name": "fsync", "vnode_json_key": "", "boundary": [1, 365000,'1440m','525600000m','24h','8760000h','1d','365000d'], "default": "5256000m,5256000m,5256000m"} + self.DB_PARAM_MAXROWS_CONFIG = {"create_name": "maxrows", "query_name": "maxrows", "vnode_json_key": "maxRows", "boundary": [200, 10000], "default": 4096} + self.DB_PARAM_MINROWS_CONFIG = {"create_name": "minrows", "query_name": "minrows", "vnode_json_key": "minRows", "boundary": [10, 1000], "default": 100} + self.DB_PARAM_NTABLES_CONFIG = {"create_name": "ntables", "query_name": "ntables", "vnode_json_key": "", "boundary": 0, "default": 0} + self.DB_PARAM_PAGES_CONFIG = {"create_name": "pages", "query_name": "pages", "vnode_json_key": "szCache", "boundary": [64], "default": 256} + self.DB_PARAM_PAGESIZE_CONFIG = {"create_name": "pagesize", "query_name": "pagesize", "vnode_json_key": "szPage", "boundary": [1, 16384], "default": 4} + self.DB_PARAM_PRECISION_CONFIG = {"create_name": "precision", "query_name": "precision", "vnode_json_key": "", "boundary": ['ms', 'us', 'ns'], "default": "ms"} + self.DB_PARAM_REPLICA_CONFIG = {"create_name": "replica", "query_name": "replica", "vnode_json_key": "", "boundary": [1], "default": 1} + self.DB_PARAM_SINGLE_STABLE_CONFIG = {"create_name": "single_stable", "query_name": "single_stable_model", "vnode_json_key": "", "boundary": [0, 1], "default": 0} + self.DB_PARAM_STRICT_CONFIG = {"create_name": "strict", "query_name": "strict", "vnode_json_key": "", "boundary": {"off": 0, "strict": 1}, "default": "off"} + self.DB_PARAM_VGROUPS_CONFIG = {"create_name": "vgroups", "query_name": "vgroups", "vnode_json_key": "", "boundary": [1, 32], "default": 2} + self.DB_PARAM_WAL_CONFIG = {"create_name": "wal", "query_name": "wal", "vnode_json_key": "", "boundary": [1, 2], "default": 1} \ No newline at end of file diff --git a/tests/army/frame/cases.py b/tests/army/frame/cases.py new file mode 100644 index 0000000000..536b8f30d3 --- /dev/null +++ b/tests/army/frame/cases.py @@ -0,0 +1,150 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import inspect +import importlib +import traceback +from util.log import * + + +class TDCase: + def __init__(self, name, case): + self.name = name + self.case = case + self._logSql = True + + +class TDCases: + def __init__(self): + self.linuxCases = [] + self.windowsCases = [] + self.clusterCases = [] + + def __dynamicLoadModule(self, fileName): + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + return importlib.import_module(moduleName, package='..') + + def logSql(self, logSql): + self._logSql = logSql + + def addWindows(self, name, case): + self.windowsCases.append(TDCase(name, case)) + + def addLinux(self, name, case): + self.linuxCases.append(TDCase(name, case)) + + def addCluster(self, name, case): + self.clusterCases.append(TDCase(name, case)) + + def runAllLinux(self, conn): + # TODO: load all Linux cases here + runNum = 0 + for tmp in self.linuxCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn) + case.run() + case.stop() + runNum += 1 + continue + + tdLog.info("total %d Linux test case(s) executed" % (runNum)) + + def runOneLinux(self, conn, fileName, replicaVar=1): + testModule = self.__dynamicLoadModule(fileName) + + runNum = 0 + for tmp in self.linuxCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn, self._logSql, replicaVar) + try: + case.run() + except Exception as e: + tdLog.notice(repr(e)) + traceback.print_exc() + tdLog.exit("%s failed" % (fileName)) + case.stop() + runNum += 1 + continue + + def runAllWindows(self, conn): + # TODO: load all Windows cases here + runNum = 0 + for tmp in self.windowsCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn) + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Windows test case(s) executed" % (runNum)) + + def runOneWindows(self, conn, fileName, replicaVar=1): + testModule = self.__dynamicLoadModule(fileName) + + runNum = 0 + for tmp in self.windowsCases: + if tmp.name.find(fileName) != -1: + case = testModule.TDTestCase() + case.init(conn, self._logSql,replicaVar) + try: + case.run() + except Exception as e: + tdLog.notice(repr(e)) + tdLog.exit("%s failed" % (fileName)) + case.stop() + runNum += 1 + continue + tdLog.notice("total %d Windows case(s) executed" % (runNum)) + + def runAllCluster(self): + # TODO: load all cluster case module here + + runNum = 0 + for tmp in self.clusterCases: + if tmp.name.find(fileName) != -1: + tdLog.notice("run cases like %s" % (fileName)) + case = testModule.TDTestCase() + case.init() + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Cluster test case(s) executed" % (runNum)) + + def runOneCluster(self, fileName): + testModule = self.__dynamicLoadModule(fileName) + + runNum = 0 + for tmp in self.clusterCases: + if tmp.name.find(fileName) != -1: + tdLog.notice("run cases like %s" % (fileName)) + case = testModule.TDTestCase() + case.init() + case.run() + case.stop() + runNum += 1 + continue + + tdLog.notice("total %d Cluster test case(s) executed" % (runNum)) + + +tdCases = TDCases() diff --git a/tests/army/frame/cluster.py b/tests/army/frame/cluster.py new file mode 100644 index 0000000000..30b70b01fc --- /dev/null +++ b/tests/army/frame/cluster.py @@ -0,0 +1,108 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os +import socket + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * + +class ClusterDnodes(TDDnodes): + """rewrite TDDnodes and make MyDdnodes as TDDnodes child class""" + def __init__(self ,dnodes_lists): + + super(ClusterDnodes,self).__init__() + self.dnodes = dnodes_lists # dnode must be TDDnode instance + self.simDeployed = False + self.testCluster = False + self.valgrind = 0 + self.killValgrind = 1 + + +class ConfigureyCluster: + """This will create defined number of dnodes and create a cluster. + at the same time, it will return TDDnodes list: dnodes, """ + hostname = socket.gethostname() + + def __init__(self): + self.dnodes = [] + self.dnodeNums = 5 + self.independent = True + self.startPort = 6030 + self.portStep = 100 + self.mnodeNums = 0 + + def configure_cluster(self ,dnodeNums=5,mnodeNums=0,independentMnode=True,startPort=6030,portStep=100,hostname="%s"%hostname): + self.startPort=int(startPort) + self.portStep=int(portStep) + self.hostname=hostname + self.dnodeNums = int(dnodeNums) + self.mnodeNums = int(mnodeNums) + self.dnodes = [] + startPort_sec = int(startPort+portStep) + for num in range(1, (self.dnodeNums+1)): + dnode = TDDnode(num) + dnode.addExtraCfg("firstEp", f"{hostname}:{self.startPort}") + dnode.addExtraCfg("fqdn", f"{hostname}") + dnode.addExtraCfg("serverPort", f"{self.startPort + (num-1)*self.portStep}") + dnode.addExtraCfg("secondEp", f"{hostname}:{startPort_sec}") + + # configure dnoe of independent mnodes + if num <= self.mnodeNums and self.mnodeNums != 0 and independentMnode == True : + tdLog.info(f"set mnode:{num} supportVnodes 0") + dnode.addExtraCfg("supportVnodes", 0) + # print(dnode) + self.dnodes.append(dnode) + return self.dnodes + + def create_dnode(self,conn,dnodeNum): + tdSql.init(conn.cursor()) + dnodeNum=int(dnodeNum) + for dnode in self.dnodes[1:dnodeNum]: + # print(dnode.cfgDict) + dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] + tdSql.execute(" create dnode '%s';"%dnode_id) + + + def create_mnode(self,conn,mnodeNums): + tdSql.init(conn.cursor()) + mnodeNums=int(mnodeNums) + for i in range(2,mnodeNums+1): + tdLog.info("create mnode on dnode %d"%i) + tdSql.execute(" create mnode on dnode %d;"%i) + + + + def check_dnode(self,conn): + tdSql.init(conn.cursor()) + count=0 + while count < 5: + tdSql.query("select * from information_schema.ins_dnodes") + # tdLog.debug(tdSql.queryResult) + status=0 + for i in range(self.dnodeNums): + if tdSql.queryResult[i][4] == "ready": + status+=1 + # tdLog.debug(status) + + if status == self.dnodeNums: + tdLog.debug(" create cluster with %d dnode and check cluster dnode all ready within 5s! " %self.dnodeNums) + break + count+=1 + time.sleep(1) + else: + tdLog.exit("create cluster with %d dnode but check dnode not ready within 5s ! "%self.dnodeNums) + + def checkConnectStatus(self,dnodeNo,hostname=hostname): + dnodeNo = int(dnodeNo) + tdLog.info("check dnode-%d connection"%(dnodeNo+1)) + hostname = socket.gethostname() + port = 6030 + dnodeNo*100 + connectToDnode = tdCom.newcon(host=hostname,port=port) + return connectToDnode + +cluster = ConfigureyCluster() \ No newline at end of file diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py new file mode 100644 index 0000000000..9c45c09715 --- /dev/null +++ b/tests/army/frame/common.py @@ -0,0 +1,1867 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +import requests +import time +import socket +import json +import toml +from util.boundary import DataBoundary +import taos +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +from util.constant import * +from dataclasses import dataclass,field +from typing import List +from datetime import datetime +import re +@dataclass +class DataSet: + ts_data : List[int] = field(default_factory=list) + int_data : List[int] = field(default_factory=list) + bint_data : List[int] = field(default_factory=list) + sint_data : List[int] = field(default_factory=list) + tint_data : List[int] = field(default_factory=list) + uint_data : List[int] = field(default_factory=list) + ubint_data : List[int] = field(default_factory=list) + usint_data : List[int] = field(default_factory=list) + utint_data : List[int] = field(default_factory=list) + float_data : List[float] = field(default_factory=list) + double_data : List[float] = field(default_factory=list) + bool_data : List[int] = field(default_factory=list) + vchar_data : List[str] = field(default_factory=list) + nchar_data : List[str] = field(default_factory=list) + + def get_order_set(self, + rows, + int_step :int = 1, + bint_step :int = 1, + sint_step :int = 1, + tint_step :int = 1, + uint_step :int = 1, + ubint_step :int = 1, + usint_step :int = 1, + utint_step :int = 1, + float_step :float = 1, + double_step :float = 1, + bool_start :int = 1, + vchar_prefix:str = "vachar_", + vchar_step :int = 1, + nchar_prefix:str = "nchar_测试_", + nchar_step :int = 1, + ts_step :int = 1 + ): + for i in range(rows): + self.int_data.append( int(i * int_step % INT_MAX )) + self.bint_data.append( int(i * bint_step % BIGINT_MAX )) + self.sint_data.append( int(i * sint_step % SMALLINT_MAX )) + self.tint_data.append( int(i * tint_step % TINYINT_MAX )) + self.uint_data.append( int(i * uint_step % INT_UN_MAX )) + self.ubint_data.append( int(i * ubint_step % BIGINT_UN_MAX )) + self.usint_data.append( int(i * usint_step % SMALLINT_UN_MAX )) + self.utint_data.append( int(i * utint_step % TINYINT_UN_MAX )) + self.float_data.append( float(i * float_step % FLOAT_MAX )) + self.double_data.append( float(i * double_step % DOUBLE_MAX )) + self.bool_data.append( bool((i + bool_start) % 2 )) + self.vchar_data.append( f"{vchar_prefix}{i * vchar_step}" ) + self.nchar_data.append( f"{nchar_prefix}{i * nchar_step}") + self.ts_data.append( int(datetime.timestamp(datetime.now()) * 1000 - i * ts_step)) + + def get_disorder_set(self, rows, **kwargs): + for k, v in kwargs.items(): + int_low = v if k == "int_low" else INT_MIN + int_up = v if k == "int_up" else INT_MAX + bint_low = v if k == "bint_low" else BIGINT_MIN + bint_up = v if k == "bint_up" else BIGINT_MAX + sint_low = v if k == "sint_low" else SMALLINT_MIN + sint_up = v if k == "sint_up" else SMALLINT_MAX + tint_low = v if k == "tint_low" else TINYINT_MIN + tint_up = v if k == "tint_up" else TINYINT_MAX + pass + + +class TDCom: + def __init__(self): + self.sml_type = None + self.env_setting = None + self.smlChildTableName_value = None + self.defaultJSONStrType_value = None + self.smlTagNullName_value = None + self.default_varchar_length = 6 + self.default_nchar_length = 6 + self.default_varchar_datatype = "letters" + self.default_nchar_datatype = "letters" + self.default_tagname_prefix = "t" + self.default_colname_prefix = "c" + self.default_stbname_prefix = "stb" + self.default_ctbname_prefix = "ctb" + self.default_tbname_prefix = "tb" + self.default_tag_index_start_num = 1 + self.default_column_index_start_num = 1 + self.default_stbname_index_start_num = 1 + self.default_ctbname_index_start_num = 1 + self.default_tbname_index_start_num = 1 + self.default_tagts_name = "ts" + self.default_colts_name = "ts" + self.dbname = "test" + self.stb_name = "stb" + self.ctb_name = "ctb" + self.tb_name = "tb" + self.tbname = str() + self.need_tagts = False + self.tag_type_str = "" + self.column_type_str = "" + self.columns_str = None + self.ts_value = None + self.tag_value_list = list() + self.column_value_list = list() + self.full_type_list = ["tinyint", "smallint", "int", "bigint", "tinyint unsigned", "smallint unsigned", "int unsigned", "bigint unsigned", "float", "double", "binary", "nchar", "bool"] + self.white_list = ["statsd", "node_exporter", "collectd", "icinga2", "tcollector", "information_schema", "performance_schema"] + self.Boundary = DataBoundary() + self.white_list = ["statsd", "node_exporter", "collectd", "icinga2", "tcollector", "information_schema", "performance_schema"] + self.case_name = str() + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.range_count = 5 + self.default_interval = 5 + self.stream_timeout = 12 + self.create_stream_sleep = 0.5 + self.record_history_ts = str() + self.precision = "ms" + self.date_time = self.genTs(precision=self.precision)[0] + self.subtable = True + self.partition_tbname_alias = "ptn_alias" if self.subtable else "" + self.partition_col_alias = "pcol_alias" if self.subtable else "" + self.partition_tag_alias = "ptag_alias" if self.subtable else "" + self.partition_expression_alias = "pexp_alias" if self.subtable else "" + self.des_table_suffix = "_output" + self.stream_suffix = "_stream" + self.subtable_prefix = "prefix_" if self.subtable else "" + self.subtable_suffix = "_suffix" if self.subtable else "" + self.downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list))) + self.tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.downsampling_function_list[0:15]))) + self.stb_source_select_str = ','.join(self.downsampling_function_list) + self.tb_source_select_str = ','.join(self.downsampling_function_list[0:15]) + self.fill_function_list = ["min(c1)", "max(c2)", "sum(c3)", "apercentile(c6, 50)", "avg(c7)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "timediff(1, 0, 1h)", "timezone()", "to_iso8601(1)", 'to_unixtimestamp("1970-01-01T08:00:00+08:00")', "min(t1)", "max(t2)", "sum(t3)", + "first(t4)", "last(t5)", "apercentile(t6, 50)", "avg(t7)", "count(t8)", "spread(t1)", "stddev(t2)", "hyperloglog(t11)"] + self.fill_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list))) + self.fill_stb_source_select_str = ','.join(self.fill_function_list) + self.fill_tb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.fill_function_list[0:13]))) + self.fill_tb_source_select_str = ','.join(self.fill_function_list[0:13]) + self.ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:13]) + self.stream_case_when_tbname = "tbname" + + self.update = True + self.disorder = True + if self.disorder: + self.update = False + self.partition_by_downsampling_function_list = ["min(c1)", "max(c2)", "sum(c3)", "first(c4)", "last(c5)", "count(c8)", "spread(c1)", + "stddev(c2)", "hyperloglog(c11)", "min(t1)", "max(t2)", "sum(t3)", "first(t4)", "last(t5)", "count(t8)", "spread(t1)", "stddev(t2)"] + + self.stb_data_filter_sql = f'ts >= {self.date_time}+1s and c1 = 1 or c2 > 1 and c3 != 4 or c4 <= 3 and c9 <> 0 or c10 is not Null or c11 is Null or \ + c12 between "na" and "nchar4" and c11 not between "bi" and "binary" and c12 match "nchar[19]" and c12 nmatch "nchar[25]" or c13 = True or \ + c5 in (1, 2, 3) or c6 not in (6, 7) and c12 like "nch%" and c11 not like "bina_" and c6 < 10 or c12 is Null or c8 >= 4 and t1 = 1 or t2 > 1 \ + and t3 != 4 or c4 <= 3 and t9 <> 0 or t10 is not Null or t11 is Null or t12 between "na" and "nchar4" and t11 not between "bi" and "binary" \ + or t12 match "nchar[19]" or t12 nmatch "nchar[25]" or t13 = True or t5 in (1, 2, 3) or t6 not in (6, 7) and t12 like "nch%" \ + and t11 not like "bina_" and t6 <= 10 or t12 is Null or t8 >= 4' + self.tb_data_filter_sql = self.stb_data_filter_sql.partition(" and t1")[0] + + self.filter_source_select_elm = "*" + self.stb_filter_des_select_elm = "ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13" + self.partitial_stb_filter_des_select_elm = ",".join(self.stb_filter_des_select_elm.split(",")[:3]) + self.exchange_stb_filter_des_select_elm = ",".join([self.stb_filter_des_select_elm.split(",")[0], self.stb_filter_des_select_elm.split(",")[2], self.stb_filter_des_select_elm.split(",")[1]]) + self.partitial_ext_tb_source_select_str = ','.join(self.downsampling_function_list[0:2]) + self.tb_filter_des_select_elm = self.stb_filter_des_select_elm.partition(", t1")[0] + self.tag_filter_des_select_elm = self.stb_filter_des_select_elm.partition("c13, ")[2] + self.partition_by_stb_output_select_str = ','.join(list(map(lambda x:f'`{x}`', self.partition_by_downsampling_function_list))) + self.partition_by_stb_source_select_str = ','.join(self.partition_by_downsampling_function_list) + self.exchange_tag_filter_des_select_elm = ",".join([self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[0], self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[2], self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[1]]) + self.partitial_tag_filter_des_select_elm = ",".join(self.stb_filter_des_select_elm.partition("c13, ")[2].split(",")[:3]) + self.partitial_tag_stb_filter_des_select_elm = "ts, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, t1, t3, t2, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13" + self.cast_tag_filter_des_select_elm = "t5,t11,t13" + self.cast_tag_stb_filter_des_select_elm = "ts, t1, t2, t3, t4, cast(t1 as TINYINT UNSIGNED), t6, t7, t8, t9, t10, cast(t2 as varchar(256)), t12, cast(t3 as bool)" + self.tag_count = len(self.tag_filter_des_select_elm.split(",")) + self.state_window_range = list() + # def init(self, conn, logSql): + # # tdSql.init(conn.cursor(), logSql) + + def preDefine(self): + header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} + sql_url = "http://127.0.0.1:6041/rest/sql" + sqlt_url = "http://127.0.0.1:6041/rest/sqlt" + sqlutc_url = "http://127.0.0.1:6041/rest/sqlutc" + influx_url = "http://127.0.0.1:6041/influxdb/v1/write" + telnet_url = "http://127.0.0.1:6041/opentsdb/v1/put/telnet" + return header, sql_url, sqlt_url, sqlutc_url, influx_url, telnet_url + + def genTcpParam(self): + MaxBytes = 1024*1024 + host ='127.0.0.1' + port = 6046 + return MaxBytes, host, port + + def tcpClient(self, input): + MaxBytes = tdCom.genTcpParam()[0] + host = tdCom.genTcpParam()[1] + port = tdCom.genTcpParam()[2] + sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + sock.connect((host, port)) + sock.send(input.encode()) + sock.close() + + def restApiPost(self, sql): + requests.post(self.preDefine()[1], sql.encode("utf-8"), headers = self.preDefine()[0]) + + def createDb(self, dbname="test", db_update_tag=0, api_type="taosc"): + if api_type == "taosc": + if db_update_tag == 0: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us'") + else: + tdSql.execute(f"drop database if exists {dbname}") + tdSql.execute(f"create database if not exists {dbname} precision 'us' update 1") + elif api_type == "restful": + if db_update_tag == 0: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us'") + else: + self.restApiPost(f"drop database if exists {dbname}") + self.restApiPost(f"create database if not exists {dbname} precision 'us' update 1") + tdSql.execute(f'use {dbname}') + + def genUrl(self, url_type, dbname, precision): + if url_type == "influxdb": + if precision is None: + url = self.preDefine()[4] + "?" + "db=" + dbname + else: + url = self.preDefine()[4] + "?" + "db=" + dbname + "&precision=" + precision + elif url_type == "telnet": + url = self.preDefine()[5] + "/" + dbname + else: + url = self.preDefine()[1] + return url + + def schemalessApiPost(self, sql, url_type="influxdb", dbname="test", precision=None): + if url_type == "influxdb": + url = self.genUrl(url_type, dbname, precision) + elif url_type == "telnet": + url = self.genUrl(url_type, dbname, precision) + res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0]) + return res + + def cleanTb(self, type="taosc", dbname="db"): + ''' + type is taosc or restful + ''' + query_sql = f"show {dbname}.stables" + res_row_list = tdSql.query(query_sql, True) + stb_list = map(lambda x: x[0], res_row_list) + for stb in stb_list: + if type == "taosc": + tdSql.execute(f'drop table if exists {dbname}.`{stb}`') + if not stb[0].isdigit(): + tdSql.execute(f'drop table if exists {dbname}.{stb}') + elif type == "restful": + self.restApiPost(f"drop table if exists {dbname}.`{stb}`") + if not stb[0].isdigit(): + self.restApiPost(f"drop table if exists {dbname}.{stb}") + + def dateToTs(self, datetime_input): + return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f"))) + + def genTs(self, precision="ms", ts="", protype="taosc", ns_tag=None): + """ + protype = "taosc" or "restful" + gen ts and datetime + """ + if precision == "ns": + if ts == "" or ts is None: + ts = time.time_ns() + else: + ts = ts + if ns_tag is None: + dt = ts + else: + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + if protype == "restful": + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + else: + if ts == "" or ts is None: + ts = time.time() + else: + ts = ts + if precision == "ms" or precision is None: + ts = int(round(ts * 1000)) + dt = datetime.fromtimestamp(ts // 1000) + if protype == "taosc": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + '000' + elif protype == "restful": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + else: + pass + elif precision == "us": + ts = int(round(ts * 1000000)) + dt = datetime.fromtimestamp(ts // 1000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000)).zfill(6) + return ts, dt + + def get_long_name(self, length=10, mode="letters"): + """ + generate long name + mode could be numbers/letters/letters_mixed/mixed + """ + if mode == "numbers": + population = string.digits + elif mode == "letters": + population = string.ascii_letters.lower() + elif mode == "letters_mixed": + population = string.ascii_letters.upper() + string.ascii_letters.lower() + else: + population = string.ascii_letters.lower() + string.digits + return "".join(random.choices(population, k=length)) + + def getLongName(self, len, mode = "mixed"): + """ + generate long name + mode could be numbers/letters/letters_mixed/mixed + """ + if mode == "numbers": + chars = ''.join(random.choice(string.digits) for i in range(len)) + elif mode == "letters": + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + elif mode == "letters_mixed": + chars = ''.join(random.choice(string.ascii_letters.upper() + string.ascii_letters.lower()) for i in range(len)) + else: + chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len)) + return chars + + def restartTaosd(self, index=1, db_name="db"): + tdDnodes.stop(index) + tdDnodes.startWithoutSleep(index) + tdSql.execute(f"use {db_name}") + + def typeof(self, variate): + v_type=None + if type(variate) is int: + v_type = "int" + elif type(variate) is str: + v_type = "str" + elif type(variate) is float: + v_type = "float" + elif type(variate) is bool: + v_type = "bool" + elif type(variate) is list: + v_type = "list" + elif type(variate) is tuple: + v_type = "tuple" + elif type(variate) is dict: + v_type = "dict" + elif type(variate) is set: + v_type = "set" + return v_type + + def splitNumLetter(self, input_mix_str): + nums, letters = "", "" + for i in input_mix_str: + if i.isdigit(): + nums += i + elif i.isspace(): + pass + else: + letters += i + return nums, letters + + def smlPass(self, func): + smlChildTableName = "no" + def wrapper(*args): + # if tdSql.getVariable("smlChildTableName")[0].upper() == "ID": + if smlChildTableName.upper() == "ID": + return func(*args) + else: + pass + return wrapper + + def close(self): + self.cursor.close() + + ######################################################################################################################################## + # new common API + ######################################################################################################################################## + def create_database(self,tsql, dbName='test',dropFlag=1,**kwargs): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + ''' + vgroups replica precision strict wal fsync comp cachelast single_stable buffer pagesize pages minrows maxrows duration keep retentions + ''' + sqlString = f'create database if not exists {dbName} ' + + dbParams = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + if param == "precision": + dbParams += f'{param} "{value}" ' + else: + dbParams += f'{param} {value} ' + sqlString += f'{dbParams}' + + tdLog.debug("create db sql: %s"%sqlString) + tsql.execute(sqlString) + tdLog.debug("complete to create database %s"%(dbName)) + return + + # def create_stable(self,tsql, dbName,stbName,column_elm_list=None, tag_elm_list=None): + # colSchema = '' + # for i in range(columnDict['int']): + # colSchema += ', c%d int'%i + # tagSchema = '' + # for i in range(tagDict['int']): + # if i > 0: + # tagSchema += ',' + # tagSchema += 't%d int'%i + + # tsql.execute("create table if not exists %s.%s (ts timestamp %s) tags(%s)"%(dbName, stbName, colSchema, tagSchema)) + # tdLog.debug("complete to create %s.%s" %(dbName, stbName)) + # return + + # def create_ctables(self,tsql, dbName,stbName,ctbNum,tagDict): + # tsql.execute("use %s" %dbName) + # tagsValues = '' + # for i in range(tagDict['int']): + # if i > 0: + # tagsValues += ',' + # tagsValues += '%d'%i + + # pre_create = "create table" + # sql = pre_create + # #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + # for i in range(ctbNum): + # sql += " %s_%d using %s tags(%s)"%(stbName,i,stbName,tagsValues) + # if (i > 0) and (i%100 == 0): + # tsql.execute(sql) + # sql = pre_create + # if sql != pre_create: + # tsql.execute(sql) + + # tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) + # return + + # def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=0): + # tdLog.debug("start to insert data ............") + # tsql.execute("use %s" %dbName) + # pre_insert = "insert into " + # sql = pre_insert + # if startTs == 0: + # t = time.time() + # startTs = int(round(t * 1000)) + # #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + # for i in range(ctbNum): + # sql += " %s_%d values "%(stbName,i) + # for j in range(rowsPerTbl): + # sql += "(%d, %d, %d)"%(startTs + j, j, j) + # if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + # tsql.execute(sql) + # if j < rowsPerTbl - 1: + # sql = "insert into %s_%d values " %(stbName,i) + # else: + # sql = "insert into " + # #end sql + # if sql != pre_insert: + # #print("insert sql:%s"%sql) + # tsql.execute(sql) + # tdLog.debug("insert data ............ [OK]") + # return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def getClientCfgPath(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + return cfgPath + + def newcon(self,host='localhost',port=6030,user='root',password='taosdata'): + con=taos.connect(host=host, user=user, password=password, port=port) + # print(con) + return con + + def newcur(self,host='localhost',port=6030,user='root',password='taosdata'): + cfgPath = self.getClientCfgPath() + con=taos.connect(host=host, user=user, password=password, config=cfgPath, port=port) + cur=con.cursor() + # print(cur) + return cur + + def newTdSql(self, host='localhost',port=6030,user='root',password='taosdata'): + newTdSql = TDSql() + cur = self.newcur(host=host,port=port,user=user,password=password) + newTdSql.init(cur, False) + return newTdSql + + ################################################################################################################ + # port from the common.py of new test frame + ################################################################################################################ + def gen_default_tag_str(self): + default_tag_str = "" + for tag_type in self.full_type_list: + if tag_type.lower() not in ["varchar", "binary", "nchar"]: + default_tag_str += f" {self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_type}," + else: + if tag_type.lower() in ["varchar", "binary"]: + default_tag_str += f" {self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_type}({self.default_varchar_length})," + else: + default_tag_str += f" {self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_type}({self.default_nchar_length})," + self.default_tag_index_start_num += 1 + if self.need_tagts: + default_tag_str = self.default_tagts_name + " timestamp," + default_tag_str + return default_tag_str[:-1].lstrip() + + def gen_default_column_str(self): + self.default_column_index_start_num = 1 + default_column_str = "" + for col_type in self.full_type_list: + if col_type.lower() not in ["varchar", "binary", "nchar"]: + default_column_str += f" {self.default_colname_prefix}{self.default_column_index_start_num} {col_type}," + else: + if col_type.lower() in ["varchar", "binary"]: + default_column_str += f" {self.default_colname_prefix}{self.default_column_index_start_num} {col_type}({self.default_varchar_length})," + else: + default_column_str += f" {self.default_colname_prefix}{self.default_column_index_start_num} {col_type}({self.default_nchar_length})," + self.default_column_index_start_num += 1 + default_column_str = self.default_colts_name + " timestamp," + default_column_str + return default_column_str[:-1].lstrip() + + def gen_tag_type_str(self, tagname_prefix, tag_elm_list): + tag_index_start_num = 1 + tag_type_str = "" + if tag_elm_list is None: + tag_type_str = self.gen_default_tag_str() + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + tag_type_str += f'{tagname_prefix}{tag_index_start_num} {tag_elm["type"]}, ' + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + tag_type_str = tag_type_str.rstrip()[:-1] + f'({tag_elm["len"]}), ' + tag_index_start_num += 1 + else: + continue + tag_type_str = tag_type_str.rstrip()[:-1] + + return tag_type_str + + def gen_column_type_str(self, colname_prefix, column_elm_list): + column_index_start_num = 1 + column_type_str = "" + if column_elm_list is None: + column_type_str = self.gen_default_column_str() + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + column_type_str += f'{colname_prefix}{column_index_start_num} {column_elm["type"]}, ' + if column_elm["type"] in ["varchar", "binary", "nchar"]: + column_type_str = column_type_str.rstrip()[:-1] + f'({column_elm["len"]}), ' + column_index_start_num += 1 + else: + continue + column_type_str = self.default_colts_name + " timestamp, " + column_type_str.rstrip()[:-1] + return column_type_str + + def gen_random_type_value(self, type_name, binary_length, binary_type, nchar_length, nchar_type): + if type_name.lower() == "tinyint": + return random.randint(self.Boundary.TINYINT_BOUNDARY[0], self.Boundary.TINYINT_BOUNDARY[1]) + elif type_name.lower() == "smallint": + return random.randint(self.Boundary.SMALLINT_BOUNDARY[0], self.Boundary.SMALLINT_BOUNDARY[1]) + elif type_name.lower() == "int": + return random.randint(self.Boundary.INT_BOUNDARY[0], self.Boundary.INT_BOUNDARY[1]) + elif type_name.lower() == "bigint": + return random.randint(self.Boundary.BIGINT_BOUNDARY[0], self.Boundary.BIGINT_BOUNDARY[1]) + elif type_name.lower() == "tinyint unsigned": + return random.randint(self.Boundary.UTINYINT_BOUNDARY[0], self.Boundary.UTINYINT_BOUNDARY[1]) + elif type_name.lower() == "smallint unsigned": + return random.randint(self.Boundary.USMALLINT_BOUNDARY[0], self.Boundary.USMALLINT_BOUNDARY[1]) + elif type_name.lower() == "int unsigned": + return random.randint(self.Boundary.UINT_BOUNDARY[0], self.Boundary.UINT_BOUNDARY[1]) + elif type_name.lower() == "bigint unsigned": + return random.randint(self.Boundary.UBIGINT_BOUNDARY[0], self.Boundary.UBIGINT_BOUNDARY[1]) + elif type_name.lower() == "float": + return random.uniform(self.Boundary.FLOAT_BOUNDARY[0], self.Boundary.FLOAT_BOUNDARY[1]) + elif type_name.lower() == "double": + return random.uniform(self.Boundary.FLOAT_BOUNDARY[0], self.Boundary.FLOAT_BOUNDARY[1]) + elif type_name.lower() == "binary": + return f'{self.get_long_name(binary_length, binary_type)}' + elif type_name.lower() == "varchar": + return self.get_long_name(binary_length, binary_type) + elif type_name.lower() == "nchar": + return self.get_long_name(nchar_length, nchar_type) + elif type_name.lower() == "bool": + return random.choice(self.Boundary.BOOL_BOUNDARY) + elif type_name.lower() == "timestamp": + return self.genTs()[0] + else: + pass + + def gen_tag_value_list(self, tag_elm_list): + tag_value_list = list() + if tag_elm_list is None: + tag_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + tag_value_list.append(self.gen_random_type_value(tag_elm["type"], tag_elm["len"], self.default_varchar_datatype, tag_elm["len"], self.default_nchar_datatype)) + else: + tag_value_list.append(self.gen_random_type_value(tag_elm["type"], "", "", "", "")) + else: + continue + return tag_value_list + + def gen_column_value_list(self, column_elm_list, ts_value=None): + if ts_value is None: + ts_value = self.genTs()[0] + + column_value_list = list() + column_value_list.append(ts_value) + if column_elm_list is None: + column_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if column_elm["type"] in ["varchar", "binary", "nchar"]: + column_value_list.append(self.gen_random_type_value(column_elm["type"], column_elm["len"], self.default_varchar_datatype, column_elm["len"], self.default_nchar_datatype)) + else: + column_value_list.append(self.gen_random_type_value(column_elm["type"], "", "", "", "")) + else: + continue + # column_value_list = [self.ts_value] + self.column_value_list + return column_value_list + + def create_stable(self, tsql, dbname=None, stbname="stb", column_elm_list=None, tag_elm_list=None, + count=1, default_stbname_prefix="stb", **kwargs): + colname_prefix = 'c' + tagname_prefix = 't' + stbname_index_start_num = 1 + stb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + stb_params += f'{param} "{value}" ' + column_type_str = self.gen_column_type_str(colname_prefix, column_elm_list) + tag_type_str = self.gen_tag_type_str(tagname_prefix, tag_elm_list) + + if int(count) <= 1: + create_stable_sql = f'create table {dbname}.{stbname} ({column_type_str}) tags ({tag_type_str}) {stb_params};' + tdLog.info("create stb sql: %s"%create_stable_sql) + tsql.execute(create_stable_sql) + else: + for _ in range(count): + create_stable_sql = f'create table {dbname}.{default_stbname_prefix}{stbname_index_start_num} ({column_type_str}) tags ({tag_type_str}) {stb_params};' + stbname_index_start_num += 1 + tsql.execute(create_stable_sql) + + def create_ctable(self, tsql, dbname=None, stbname=None, tag_elm_list=None, count=1, default_ctbname_prefix="ctb", **kwargs): + ctbname_index_start_num = 0 + ctb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + ctb_params += f'{param} "{value}" ' + tag_value_list = self.gen_tag_value_list(tag_elm_list) + tag_value_str = "" + # tag_value_str = ", ".join(str(v) for v in self.tag_value_list) + for tag_value in tag_value_list: + if isinstance(tag_value, str): + tag_value_str += f'"{tag_value}", ' + else: + tag_value_str += f'{tag_value}, ' + tag_value_str = tag_value_str.rstrip()[:-1] + + if int(count) <= 1: + create_ctable_sql = f'create table {dbname}.{default_ctbname_prefix}{ctbname_index_start_num} using {dbname}.{stbname} tags ({tag_value_str}) {ctb_params};' + tsql.execute(create_ctable_sql) + else: + for _ in range(count): + create_ctable_sql = f'create table {dbname}.{default_ctbname_prefix}{ctbname_index_start_num} using {dbname}.{stbname} tags ({tag_value_str}) {ctb_params};' + ctbname_index_start_num += 1 + tdLog.info("create ctb sql: %s"%create_ctable_sql) + tsql.execute(create_ctable_sql) + + def create_table(self, tsql, dbname=None, tbname="ntb", column_elm_list=None, count=1, **kwargs): + tbname_index_start_num = 1 + tbname_prefix="ntb" + + tb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + tb_params += f'{param} "{value}" ' + column_type_str = self.gen_column_type_str(tbname_prefix, column_elm_list) + + if int(count) <= 1: + create_table_sql = f'create table {dbname}.{tbname} ({column_type_str}) {tb_params};' + tsql.execute(create_table_sql) + else: + for _ in range(count): + create_table_sql = f'create table {dbname}.{tbname_prefix}{tbname_index_start_num} ({column_type_str}) {tb_params};' + tbname_index_start_num += 1 + tsql.execute(create_table_sql) + + def insert_rows(self, tsql, dbname=None, tbname=None, column_ele_list=None, start_ts_value=None, count=1): + if start_ts_value is None: + start_ts_value = self.genTs()[0] + + column_value_list = self.gen_column_value_list(column_ele_list, start_ts_value) + # column_value_str = ", ".join(str(v) for v in self.column_value_list) + column_value_str = "" + for column_value in column_value_list: + if isinstance(column_value, str): + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + if int(count) <= 1: + insert_sql = f'insert into {self.tb_name} values ({column_value_str});' + tsql.execute(insert_sql) + else: + for num in range(count): + column_value_list = self.gen_column_value_list(column_ele_list, f'{start_ts_value}+{num}s') + # column_value_str = ", ".join(str(v) for v in column_value_list) + column_value_str = '' + idx = 0 + for column_value in column_value_list: + if isinstance(column_value, str) and idx != 0: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + idx += 1 + column_value_str = column_value_str.rstrip()[:-1] + insert_sql = f'insert into {dbname}.{tbname} values ({column_value_str});' + tsql.execute(insert_sql) + def getOneRow(self, location, containElm): + res_list = list() + if 0 <= location < tdSql.queryRows: + for row in tdSql.queryResult: + if row[location] == containElm: + res_list.append(row) + return res_list + else: + tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}") + + def killProcessor(self, processorName): + if (platform.system().lower() == 'windows'): + os.system("TASKKILL /F /IM %s.exe"%processorName) + else: + os.system("unset LD_PRELOAD; pkill %s " % processorName) + + def gen_tag_col_str(self, gen_type, data_type, count): + """ + gen multi tags or cols by gen_type + """ + return ','.join(map(lambda i: f'{gen_type}{i} {data_type}', range(count))) + + # stream + def create_stream(self, stream_name, des_table, source_sql, trigger_mode=None, watermark=None, max_delay=None, ignore_expired=None, ignore_update=None, subtable_value=None, fill_value=None, fill_history_value=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False): + """create_stream + + Args: + stream_name (str): stream_name + des_table (str): target stable + source_sql (str): stream sql + trigger_mode (str, optional): at_once/window_close/max_delay. Defaults to None. + watermark (str, optional): watermark time. Defaults to None. + max_delay (str, optional): max_delay time. Defaults to None. + ignore_expired (int, optional): ignore expired data. Defaults to None. + ignore_update (int, optional): ignore update data. Defaults to None. + subtable_value (str, optional): subtable. Defaults to None. + fill_value (str, optional): fill. Defaults to None. + fill_history_value (int, optional): 0/1. Defaults to None. + stb_field_name_value (str, optional): existed stb. Defaults to None. + tag_value (str, optional): custom tag. Defaults to None. + use_exist_stb (bool, optional): use existed stb tag. Defaults to False. + use_except (bool, optional): Exception tag. Defaults to False. + + Returns: + str: stream + """ + if subtable_value is None: + subtable = "" + else: + subtable = f'subtable({subtable_value})' + + if fill_value is None: + fill = "" + else: + fill = f'fill({fill_value})' + + if fill_history_value is None: + fill_history = "" + else: + fill_history = f'fill_history {fill_history_value}' + + if use_exist_stb: + if stb_field_name_value is None: + stb_field_name = "" + else: + stb_field_name = f'({stb_field_name_value})' + + if tag_value is None: + tags = "" + else: + tags = f'tags({tag_value})' + else: + stb_field_name = "" + tags = "" + + + if trigger_mode is None: + stream_options = "" + if watermark is not None: + stream_options = f'watermark {watermark}' + if ignore_expired: + stream_options += f" ignore expired {ignore_expired}" + else: + stream_options += f" ignore expired 0" + if ignore_update: + stream_options += f" ignore update {ignore_update}" + else: + stream_options += f" ignore update 0" + if not use_except: + tdSql.execute(f'create stream if not exists {stream_name} trigger at_once {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};') + time.sleep(self.create_stream_sleep) + return None + else: + return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table} {subtable} as {source_sql} {fill};' + else: + if watermark is None: + if trigger_mode == "max_delay": + stream_options = f'trigger {trigger_mode} {max_delay}' + else: + stream_options = f'trigger {trigger_mode}' + else: + if trigger_mode == "max_delay": + stream_options = f'trigger {trigger_mode} {max_delay} watermark {watermark}' + else: + stream_options = f'trigger {trigger_mode} watermark {watermark}' + if ignore_expired: + stream_options += f" ignore expired {ignore_expired}" + else: + stream_options += f" ignore expired 0" + + if ignore_update: + stream_options += f" ignore update {ignore_update}" + else: + stream_options += f" ignore update 0" + if not use_except: + tdSql.execute(f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};') + time.sleep(self.create_stream_sleep) + return None + else: + return f'create stream if not exists {stream_name} {stream_options} {fill_history} into {des_table}{stb_field_name} {tags} {subtable} as {source_sql} {fill};' + + def pause_stream(self, stream_name, if_exist=True, if_not_exist=False): + """pause_stream + + Args: + stream_name (str): stream_name + if_exist (bool, optional): Defaults to True. + if_not_exist (bool, optional): Defaults to False. + """ + if_exist_value = "if exists" if if_exist else "" + if_not_exist_value = "if not exists" if if_not_exist else "" + tdSql.execute(f'pause stream {if_exist_value} {if_not_exist_value} {stream_name}') + + def resume_stream(self, stream_name, if_exist=True, if_not_exist=False, ignore_untreated=False): + """resume_stream + + Args: + stream_name (str): stream_name + if_exist (bool, optional): Defaults to True. + if_not_exist (bool, optional): Defaults to False. + ignore_untreated (bool, optional): Defaults to False. + """ + if_exist_value = "if exists" if if_exist else "" + if_not_exist_value = "if not exists" if if_not_exist else "" + ignore_untreated_value = "ignore untreated" if ignore_untreated else "" + tdSql.execute(f'resume stream {if_exist_value} {if_not_exist_value} {ignore_untreated_value} {stream_name}') + + def drop_all_streams(self): + """drop all streams + """ + tdSql.query("show streams") + stream_name_list = list(map(lambda x: x[0], tdSql.queryResult)) + for stream_name in stream_name_list: + tdSql.execute(f'drop stream if exists {stream_name};') + + def drop_db(self, dbname="test"): + """drop a db + + Args: + dbname (str, optional): Defaults to "test". + """ + if dbname[0].isdigit(): + tdSql.execute(f'drop database if exists `{dbname}`') + else: + tdSql.execute(f'drop database if exists {dbname}') + + def drop_all_db(self): + """drop all databases + """ + tdSql.query("show databases;") + db_list = list(map(lambda x: x[0], tdSql.queryResult)) + for dbname in db_list: + if dbname not in self.white_list and "telegraf" not in dbname: + tdSql.execute(f'drop database if exists `{dbname}`') + + def time_cast(self, time_value, split_symbol="+"): + """cast bigint to timestamp + + Args: + time_value (bigint): ts + split_symbol (str, optional): split sympol. Defaults to "+". + + Returns: + _type_: timestamp + """ + ts_value = str(time_value).split(split_symbol)[0] + if split_symbol in str(time_value): + ts_value_offset = str(time_value).split(split_symbol)[1] + else: + ts_value_offset = "0s" + return f'cast({ts_value} as timestamp){split_symbol}{ts_value_offset}' + + def clean_env(self): + """drop all streams and databases + """ + self.drop_all_streams() + self.drop_all_db() + + def set_precision_offset(self, precision): + if precision == "ms": + self.offset = 1000 + elif precision == "us": + self.offset = 1000000 + elif precision == "ns": + self.offset = 1000000000 + else: + pass + + def genTs(self, precision="ms", ts="", protype="taosc", ns_tag=None): + """generate ts + + Args: + precision (str, optional): db precision. Defaults to "ms". + ts (str, optional): input ts. Defaults to "". + protype (str, optional): "taosc" or "restful". Defaults to "taosc". + ns_tag (_type_, optional): use ns. Defaults to None. + + Returns: + timestamp, datetime: timestamp and datetime + """ + if precision == "ns": + if ts == "" or ts is None: + ts = time.time_ns() + else: + ts = ts + if ns_tag is None: + dt = ts + else: + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + if protype == "restful": + dt = datetime.fromtimestamp(ts // 1000000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000000)).zfill(9) + else: + if ts == "" or ts is None: + ts = time.time() + else: + ts = ts + if precision == "ms" or precision is None: + ts = int(round(ts * 1000)) + dt = datetime.fromtimestamp(ts // 1000) + if protype == "taosc": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + '000' + elif protype == "restful": + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000)).zfill(3) + else: + pass + elif precision == "us": + ts = int(round(ts * 1000000)) + dt = datetime.fromtimestamp(ts // 1000000) + dt = dt.strftime('%Y-%m-%d %H:%M:%S') + '.' + str(int(ts % 1000000)).zfill(6) + return ts, dt + + def sgen_column_type_str(self, column_elm_list): + """generage column type str + + Args: + column_elm_list (list): column_elm_list + """ + self.column_type_str = "" + if column_elm_list is None: + self.column_type_str = self.gen_default_column_str() + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + self.column_type_str += f'{self.default_colname_prefix}{self.default_column_index_start_num} {column_elm["type"]}, ' + if column_elm["type"] in ["varchar", "binary", "nchar"]: + self.column_type_str = self.column_type_str.rstrip()[:-1] + f'({column_elm["len"]}), ' + self.default_column_index_start_num += 1 + else: + continue + self.column_type_str = self.default_colts_name + " timestamp, " + self.column_type_str.rstrip()[:-1] + + def sgen_tag_type_str(self, tag_elm_list): + """generage tag type str + + Args: + tag_elm_list (list): tag_elm_list + """ + self.tag_type_str = "" + if tag_elm_list is None: + self.tag_type_str = self.gen_default_tag_str() + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + self.tag_type_str += f'{self.default_tagname_prefix}{self.default_tag_index_start_num} {tag_elm["type"]}, ' + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + self.tag_type_str = self.tag_type_str.rstrip()[:-1] + f'({tag_elm["len"]}), ' + self.default_tag_index_start_num += 1 + else: + continue + self.tag_type_str = self.tag_type_str.rstrip()[:-1] + if self.need_tagts: + self.tag_type_str = self.default_tagts_name + " timestamp, " + self.tag_type_str + + def sgen_tag_value_list(self, tag_elm_list, ts_value=None): + """generage tag value str + + Args: + tag_elm_list (list): _description_ + ts_value (timestamp, optional): Defaults to None. + """ + if self.need_tagts: + self.ts_value = self.genTs()[0] + if ts_value is not None: + self.ts_value = ts_value + + if tag_elm_list is None: + self.tag_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for tag_elm in tag_elm_list: + if "count" in tag_elm: + total_count = int(tag_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if tag_elm["type"] in ["varchar", "binary", "nchar"]: + self.tag_value_list.append(self.gen_random_type_value(tag_elm["type"], tag_elm["len"], self.default_varchar_datatype, tag_elm["len"], self.default_nchar_datatype)) + else: + self.tag_value_list.append(self.gen_random_type_value(tag_elm["type"], "", "", "", "")) + else: + continue + # if self.need_tagts and self.ts_value is not None and len(str(self.ts_value)) > 0: + if self.need_tagts: + self.tag_value_list = [self.ts_value] + self.tag_value_list + + def screateDb(self, dbname="test", drop_db=True, **kwargs): + """create database + + Args: + dbname (str, optional): Defaults to "test". + drop_db (bool, optional): Defaults to True. + """ + tdLog.info("creating db ...") + db_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + if param == "precision": + db_params += f'{param} "{value}" ' + else: + db_params += f'{param} {value} ' + if drop_db: + self.drop_db(dbname) + tdSql.execute(f'create database if not exists {dbname} {db_params}') + tdSql.execute(f'use {dbname}') + + def screate_stable(self, dbname=None, stbname="stb", use_name="table", column_elm_list=None, tag_elm_list=None, + need_tagts=False, count=1, default_stbname_prefix="stb", default_stbname_index_start_num=1, + default_column_index_start_num=1, default_tag_index_start_num=1, **kwargs): + """_summary_ + + Args: + dbname (str, optional): Defaults to None. + stbname (str, optional): Defaults to "stb". + use_name (str, optional): stable/table, Defaults to "table". + column_elm_list (list, optional): use for sgen_column_type_str(), Defaults to None. + tag_elm_list (list, optional): use for sgen_tag_type_str(), Defaults to None. + need_tagts (bool, optional): tag use timestamp, Defaults to False. + count (int, optional): stable count, Defaults to 1. + default_stbname_prefix (str, optional): Defaults to "stb". + default_stbname_index_start_num (int, optional): Defaults to 1. + default_column_index_start_num (int, optional): Defaults to 1. + default_tag_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating stable ...") + if dbname is not None: + self.dbname = dbname + self.need_tagts = need_tagts + self.default_stbname_prefix = default_stbname_prefix + self.default_stbname_index_start_num = default_stbname_index_start_num + self.default_column_index_start_num = default_column_index_start_num + self.default_tag_index_start_num = default_tag_index_start_num + stb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + stb_params += f'{param} "{value}" ' + self.sgen_column_type_str(column_elm_list) + self.sgen_tag_type_str(tag_elm_list) + if self.dbname is not None: + stb_name = f'{self.dbname}.{stbname}' + else: + stb_name = stbname + if int(count) <= 1: + create_stable_sql = f'create {use_name} {stb_name} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + tdSql.execute(create_stable_sql) + else: + for _ in range(count): + create_stable_sql = f'create {use_name} {self.dbname}.{default_stbname_prefix}{default_stbname_index_start_num} ({self.column_type_str}) tags ({self.tag_type_str}) {stb_params};' + default_stbname_index_start_num += 1 + tdSql.execute(create_stable_sql) + + def screate_ctable(self, dbname=None, stbname=None, ctbname="ctb", use_name="table", tag_elm_list=None, ts_value=None, count=1, default_varchar_datatype="letters", default_nchar_datatype="letters", default_ctbname_prefix="ctb", default_ctbname_index_start_num=1, **kwargs): + """_summary_ + + Args: + dbname (str, optional): Defaults to None. + stbname (str, optional): Defaults to None. + ctbname (str, optional): Defaults to "ctb". + use_name (str, optional): Defaults to "table". + tag_elm_list (list, optional): use for sgen_tag_type_str(), Defaults to None. + ts_value (timestamp, optional): Defaults to None. + count (int, optional): ctb count, Defaults to 1. + default_varchar_datatype (str, optional): Defaults to "letters". + default_nchar_datatype (str, optional): Defaults to "letters". + default_ctbname_prefix (str, optional): Defaults to "ctb". + default_ctbname_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating childtable ...") + self.default_varchar_datatype = default_varchar_datatype + self.default_nchar_datatype = default_nchar_datatype + self.default_ctbname_prefix = default_ctbname_prefix + self.default_ctbname_index_start_num = default_ctbname_index_start_num + ctb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + ctb_params += f'{param} "{value}" ' + self.sgen_tag_value_list(tag_elm_list, ts_value) + tag_value_str = "" + # tag_value_str = ", ".join(str(v) for v in self.tag_value_list) + for tag_value in self.tag_value_list: + if isinstance(tag_value, str): + tag_value_str += f'"{tag_value}", ' + else: + tag_value_str += f'{tag_value}, ' + tag_value_str = tag_value_str.rstrip()[:-1] + if dbname is not None: + self.dbname = dbname + ctb_name = f'{self.dbname}.{ctbname}' + else: + ctb_name = ctbname + if stbname is not None: + stb_name = stbname + if int(count) <= 1: + create_ctable_sql = f'create {use_name} {ctb_name} using {stb_name} tags ({tag_value_str}) {ctb_params};' + tdSql.execute(create_ctable_sql) + else: + for _ in range(count): + create_stable_sql = f'create {use_name} {self.dbname}.{default_ctbname_prefix}{default_ctbname_index_start_num} using {self.stb_name} tags ({tag_value_str}) {ctb_params};' + default_ctbname_index_start_num += 1 + tdSql.execute(create_stable_sql) + + def sgen_column_value_list(self, column_elm_list, need_null, ts_value=None): + """_summary_ + + Args: + column_elm_list (list): gen_random_type_value() + need_null (bool): if insert null + ts_value (timestamp, optional): Defaults to None. + """ + self.column_value_list = list() + self.ts_value = self.genTs()[0] + if ts_value is not None: + self.ts_value = ts_value + + if column_elm_list is None: + self.column_value_list = list(map(lambda i: self.gen_random_type_value(i, self.default_varchar_length, self.default_varchar_datatype, self.default_nchar_length, self.default_nchar_datatype), self.full_type_list)) + else: + for column_elm in column_elm_list: + if "count" in column_elm: + total_count = int(column_elm["count"]) + else: + total_count = 1 + if total_count > 0: + for _ in range(total_count): + if column_elm["type"] in ["varchar", "binary", "nchar"]: + self.column_value_list.append(self.gen_random_type_value(column_elm["type"], column_elm["len"], self.default_varchar_datatype, column_elm["len"], self.default_nchar_datatype)) + else: + self.column_value_list.append(self.gen_random_type_value(column_elm["type"], "", "", "", "")) + else: + continue + if need_null: + for i in range(int(len(self.column_value_list)/2)): + index_num = random.randint(0, len(self.column_value_list)-1) + self.column_value_list[index_num] = None + self.column_value_list = [self.ts_value] + self.column_value_list + + def screate_table(self, dbname=None, tbname="tb", use_name="table", column_elm_list=None, + count=1, default_tbname_prefix="tb", default_tbname_index_start_num=1, + default_column_index_start_num=1, **kwargs): + """create ctable + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to "tb". + use_name (str, optional): Defaults to "table". + column_elm_list (list, optional): Defaults to None. + count (int, optional): Defaults to 1. + default_tbname_prefix (str, optional): Defaults to "tb". + default_tbname_index_start_num (int, optional): Defaults to 1. + default_column_index_start_num (int, optional): Defaults to 1. + """ + tdLog.info("creating table ...") + if dbname is not None: + self.dbname = dbname + self.default_tbname_prefix = default_tbname_prefix + self.default_tbname_index_start_num = default_tbname_index_start_num + self.default_column_index_start_num = default_column_index_start_num + tb_params = "" + if len(kwargs) > 0: + for param, value in kwargs.items(): + tb_params += f'{param} "{value}" ' + self.sgen_column_type_str(column_elm_list) + if self.dbname is not None: + tb_name = f'{self.dbname}.{tbname}' + else: + tb_name = tbname + if int(count) <= 1: + create_table_sql = f'create {use_name} {tb_name} ({self.column_type_str}) {tb_params};' + tdSql.execute(create_table_sql) + else: + for _ in range(count): + create_table_sql = f'create {use_name} {self.dbname}.{default_tbname_prefix}{default_tbname_index_start_num} ({self.column_type_str}) {tb_params};' + default_tbname_index_start_num += 1 + tdSql.execute(create_table_sql) + + def sinsert_rows(self, dbname=None, tbname=None, column_ele_list=None, ts_value=None, count=1, need_null=False): + """insert rows + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to None. + column_ele_list (list, optional): Defaults to None. + ts_value (timestamp, optional): Defaults to None. + count (int, optional): Defaults to 1. + need_null (bool, optional): Defaults to False. + """ + tdLog.info("stream inserting ...") + if dbname is not None: + self.dbname = dbname + if tbname is not None: + self.tbname = f'{self.dbname}.{tbname}' + else: + if tbname is not None: + self.tbname = tbname + + self.sgen_column_value_list(column_ele_list, need_null, ts_value) + # column_value_str = ", ".join(str(v) for v in self.column_value_list) + column_value_str = "" + for column_value in self.column_value_list: + if column_value is None: + column_value_str += 'Null, ' + elif isinstance(column_value, str) and "+" not in column_value and "-" not in column_value: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + if int(count) <= 1: + insert_sql = f'insert into {self.tbname} values ({column_value_str});' + tdSql.execute(insert_sql) + else: + for num in range(count): + ts_value = self.genTs()[0] + self.sgen_column_value_list(column_ele_list, need_null, f'{ts_value}+{num}s') + column_value_str = "" + for column_value in self.column_value_list: + if column_value is None: + column_value_str += 'Null, ' + elif isinstance(column_value, str) and "+" not in column_value: + column_value_str += f'"{column_value}", ' + else: + column_value_str += f'{column_value}, ' + column_value_str = column_value_str.rstrip()[:-1] + insert_sql = f'insert into {self.tbname} values ({column_value_str});' + tdSql.execute(insert_sql) + + def sdelete_rows(self, dbname=None, tbname=None, start_ts=None, end_ts=None, ts_key=None): + """delete rows + + Args: + dbname (str, optional): Defaults to None. + tbname (str, optional): Defaults to None. + start_ts (timestamp, optional): range start. Defaults to None. + end_ts (timestamp, optional): range end. Defaults to None. + ts_key (str, optional): timestamp column name. Defaults to None. + """ + if dbname is not None: + self.dbname = dbname + if tbname is not None: + self.tbname = f'{self.dbname}.{tbname}' + else: + if tbname is not None: + self.tbname = tbname + if ts_key is None: + ts_col_name = self.default_colts_name + else: + ts_col_name = ts_key + + base_del_sql = f'delete from {self.tbname} ' + if end_ts is not None: + if ":" in start_ts and "-" in start_ts: + start_ts = f"{start_ts}" + if ":" in end_ts and "-" in end_ts: + end_ts = f"{end_ts}" + base_del_sql += f'where {ts_col_name} between {start_ts} and {end_ts};' + else: + if start_ts is not None: + if ":" in start_ts and "-" in start_ts: + start_ts = f"{start_ts}" + base_del_sql += f'where {ts_col_name} = {start_ts};' + tdSql.execute(base_del_sql) + + def check_stream_field_type(self, sql, input_function): + """confirm stream field + + Args: + sql (str): input sql + input_function (str): scalar + """ + tdSql.query(sql) + res = tdSql.queryResult + if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]: + tdSql.checkEqual(res[1][1], "DOUBLE") + tdSql.checkEqual(res[2][1], "DOUBLE") + elif input_function in ["lower", "ltrim", "rtrim", "upper"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "VARCHAR") + tdSql.checkEqual(res[3][1], "NCHAR") + elif input_function in ["char_length", "length"]: + tdSql.checkEqual(res[1][1], "BIGINT") + tdSql.checkEqual(res[2][1], "BIGINT") + tdSql.checkEqual(res[3][1], "BIGINT") + elif input_function in ["concat", "concat_ws"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "NCHAR") + tdSql.checkEqual(res[3][1], "NCHAR") + tdSql.checkEqual(res[4][1], "NCHAR") + elif input_function in ["substr"]: + tdSql.checkEqual(res[1][1], "VARCHAR") + tdSql.checkEqual(res[2][1], "VARCHAR") + tdSql.checkEqual(res[3][1], "VARCHAR") + tdSql.checkEqual(res[4][1], "NCHAR") + else: + tdSql.checkEqual(res[1][1], "INT") + tdSql.checkEqual(res[2][1], "DOUBLE") + + def round_handle(self, input_list): + """round list elem + + Args: + input_list (list): input value list + + Returns: + _type_: round list + """ + tdLog.info("round rows ...") + final_list = list() + for i in input_list: + tmpl = list() + for j in i: + if type(j) != datetime and type(j) != str: + tmpl.append(round(j, 1)) + else: + tmpl.append(j) + final_list.append(tmpl) + return final_list + + def float_handle(self, input_list): + """float list elem + + Args: + input_list (list): input value list + + Returns: + _type_: float list + """ + tdLog.info("float rows ...") + final_list = list() + for i in input_list: + tmpl = list() + for j_i,j_v in enumerate(i): + if type(j_v) != datetime and j_v is not None and str(j_v).isdigit() and j_i <= 12: + tmpl.append(float(j_v)) + else: + tmpl.append(j_v) + final_list.append(tuple(tmpl)) + return final_list + + def str_ts_trans_bigint(self, str_ts): + """trans str ts to bigint + + Args: + str_ts (str): human-date + + Returns: + bigint: bigint-ts + """ + tdSql.query(f'select cast({str_ts} as bigint)') + return tdSql.queryResult[0][0] + + def cast_query_data(self, query_data): + """cast query-result for existed-stb + + Args: + query_data (list): query data list + + Returns: + list: new list after cast + """ + tdLog.info("cast query data ...") + col_type_list = self.column_type_str.split(',') + tag_type_list = self.tag_type_str.split(',') + col_tag_type_list = col_type_list + tag_type_list + nl = list() + for query_data_t in query_data: + query_data_l = list(query_data_t) + for i,v in enumerate(query_data_l): + if v is not None: + if " ".join(col_tag_type_list[i].strip().split(" ")[1:]) == "nchar(6)": + tdSql.query(f'select cast("{v}" as binary(6))') + else: + tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})') + query_data_l[i] = tdSql.queryResult[0][0] + else: + query_data_l[i] = v + nl.append(tuple(query_data_l)) + return nl + + def trans_time_to_s(self, runtime): + """trans time to s + + Args: + runtime (str): 1d/1h/1m... + + Returns: + int: second + """ + if "d" in str(runtime).lower(): + d_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(d_num) * 24 * 60 * 60 + elif "h" in str(runtime).lower(): + h_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(h_num) * 60 * 60 + elif "m" in str(runtime).lower(): + m_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + s_num = float(m_num) * 60 + elif "s" in str(runtime).lower(): + s_num = re.findall("\d+\.?\d*", runtime.replace(" ", ""))[0] + else: + s_num = 60 + return int(s_num) + + def check_query_data(self, sql1, sql2, sorted=False, fill_value=None, tag_value_list=None, defined_tag_count=None, partition=True, use_exist_stb=False, subtable=None, reverse_check=False): + """confirm query result + + Args: + sql1 (str): select .... + sql2 (str): select .... + sorted (bool, optional): if sort result list. Defaults to False. + fill_value (str, optional): fill. Defaults to None. + tag_value_list (list, optional): Defaults to None. + defined_tag_count (int, optional): Defaults to None. + partition (bool, optional): Defaults to True. + use_exist_stb (bool, optional): Defaults to False. + subtable (str, optional): Defaults to None. + reverse_check (bool, optional): not equal. Defaults to False. + + Returns: + bool: False if failed + """ + tdLog.info("checking query data ...") + if tag_value_list: + dvalue = len(self.tag_type_str.split(',')) - defined_tag_count + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + tdSql.sql = sql1 + new_list = list() + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + + latency = 0 + if sorted: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if not reverse_check: + while res1 != res2: + tdLog.info("query retrying ...") + new_list = list() + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + # res2 = tdSql.queryResult + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + tdSql.sql = sql1 + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + if sorted or tag_value_list: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if latency == 0: + return False + tdSql.checkEqual(res1, res2) + # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + else: + while res1 == res2: + tdLog.info("query retrying ...") + new_list = list() + tdSql.query(sql1) + res1 = tdSql.queryResult + tdSql.query(sql2) + # res2 = tdSql.queryResult + res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult + tdSql.sql = sql1 + + if tag_value_list: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + if i < len(tag_value_list): + if partition: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + list(tag_value_list[i]) + [None]*dvalue)) + else: + new_list.append(tuple(list(v)[:-(dvalue+defined_tag_count)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + else: + if use_exist_stb: + res1 = self.float_handle(res1) + res2 = self.float_handle(res2) + for i,v in enumerate(res2): + new_list.append(tuple(list(v)[:-(13)] + [None]*len(self.tag_type_str.split(',')))) + res2 = new_list + if sorted or tag_value_list: + res1.sort() + res2.sort() + if fill_value == "LINEAR": + res1 = self.round_handle(res1) + res2 = self.round_handle(res2) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if latency == 0: + return False + tdSql.checkNotEqual(res1, res2) + # tdSql.checkEqual(res1, res2) if not reverse_check else tdSql.checkNotEqual(res1, res2) + + def check_stream_res(self, sql, expected_res, max_delay): + """confirm stream result + + Args: + sql (str): select ... + expected_res (str): expected result + max_delay (int): max_delay value + + Returns: + bool: False if failed + """ + tdSql.query(sql) + latency = 0 + + while tdSql.queryRows != expected_res: + tdSql.query(sql) + if latency < self.stream_timeout: + latency += 0.2 + time.sleep(0.2) + else: + if max_delay is not None: + if latency == 0: + return False + tdSql.checkEqual(tdSql.queryRows, expected_res) + + def check_stream(self, sql1, sql2, expected_count, max_delay=None): + """confirm stream + + Args: + sql1 (str): select ... + sql2 (str): select ... + expected_count (int): expected_count + max_delay (int, optional): max_delay value. Defaults to None. + """ + self.check_stream_res(sql1, expected_count, max_delay) + self.check_query_data(sql1, sql2) + + def cal_watermark_window_close_session_endts(self, start_ts, watermark=None, session=None): + """cal endts for close window + + Args: + start_ts (epoch time): self.date_time + watermark (int, optional): > session. Defaults to None. + session (int, optional): Defaults to None. + + Returns: + int: as followed + """ + if watermark is not None: + return start_ts + watermark*self.offset + 1 + else: + return start_ts + session*self.offset + 1 + + def cal_watermark_window_close_interval_endts(self, start_ts, interval, watermark=None): + """cal endts for close window + + Args: + start_ts (epoch time): self.date_time + interval (int): [s] + watermark (int, optional): [s]. Defaults to None. + + Returns: + _type_: _description_ + """ + if watermark is not None: + return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + watermark*self.offset + else: + return int(start_ts/self.offset)*self.offset + (interval - (int(start_ts/self.offset))%interval)*self.offset + + def update_delete_history_data(self, delete): + """update and delete history data + + Args: + delete (bool): True/False + """ + self.sinsert_rows(tbname=self.ctb_name, ts_value=self.record_history_ts) + self.sinsert_rows(tbname=self.tb_name, ts_value=self.record_history_ts) + if delete: + self.sdelete_rows(tbname=self.ctb_name, start_ts=self.time_cast(self.record_history_ts, "-")) + self.sdelete_rows(tbname=self.tb_name, start_ts=self.time_cast(self.record_history_ts, "-")) + + def prepare_data(self, interval=None, watermark=None, session=None, state_window=None, state_window_max=127, interation=3, range_count=None, precision="ms", fill_history_value=0, ext_stb=None): + """prepare stream data + + Args: + interval (int, optional): Defaults to None. + watermark (int, optional): Defaults to None. + session (int, optional): Defaults to None. + state_window (str, optional): Defaults to None. + state_window_max (int, optional): Defaults to 127. + interation (int, optional): Defaults to 3. + range_count (int, optional): Defaults to None. + precision (str, optional): Defaults to "ms". + fill_history_value (int, optional): Defaults to 0. + ext_stb (bool, optional): Defaults to None. + """ + self.clean_env() + self.dataDict = { + "stb_name" : f"{self.case_name}_stb", + "ctb_name" : f"{self.case_name}_ct1", + "tb_name" : f"{self.case_name}_tb1", + "ext_stb_name" : f"ext_{self.case_name}_stb", + "ext_ctb_name" : f"ext_{self.case_name}_ct1", + "ext_tb_name" : f"ext_{self.case_name}_tb1", + "interval" : interval, + "watermark": watermark, + "session": session, + "state_window": state_window, + "state_window_max": state_window_max, + "iteration": interation, + "range_count": range_count, + "start_ts": 1655903478508, + } + if range_count is not None: + self.range_count = range_count + if precision is not None: + self.precision = precision + self.set_precision_offset(self.precision) + + self.stb_name = self.dataDict["stb_name"] + self.ctb_name = self.dataDict["ctb_name"] + self.tb_name = self.dataDict["tb_name"] + self.ext_stb_name = self.dataDict["ext_stb_name"] + self.ext_ctb_name = self.dataDict["ext_ctb_name"] + self.ext_tb_name = self.dataDict["ext_tb_name"] + self.stb_stream_des_table = f'{self.stb_name}{self.des_table_suffix}' + self.ctb_stream_des_table = f'{self.ctb_name}{self.des_table_suffix}' + self.tb_stream_des_table = f'{self.tb_name}{self.des_table_suffix}' + self.ext_stb_stream_des_table = f'{self.ext_stb_name}{self.des_table_suffix}' + self.ext_ctb_stream_des_table = f'{self.ext_ctb_name}{self.des_table_suffix}' + self.ext_tb_stream_des_table = f'{self.ext_tb_name}{self.des_table_suffix}' + self.date_time = self.genTs(precision=self.precision)[0] + + self.screateDb(dbname=self.dbname, precision=self.precision) + if ext_stb: + self.screate_stable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table) + self.screate_ctable(dbname=self.dbname, stbname=self.ext_stb_stream_des_table, ctbname=self.ext_ctb_stream_des_table) + self.screate_table(dbname=self.dbname, tbname=self.ext_tb_stream_des_table) + self.screate_stable(dbname=self.dbname, stbname=self.stb_name) + self.screate_ctable(dbname=self.dbname, stbname=self.stb_name, ctbname=self.ctb_name) + self.screate_table(dbname=self.dbname, tbname=self.tb_name) + if fill_history_value == 1: + for i in range(self.range_count): + ts_value = str(self.date_time)+f'-{self.default_interval*(i+1)}s' + self.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value) + self.sinsert_rows(tbname=self.tb_name, ts_value=ts_value) + if i == 1: + self.record_history_ts = ts_value + +def is_json(msg): + if isinstance(msg, str): + try: + json.loads(msg) + return True + except: + return False + else: + return False + +def get_path(tool="taosd"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] + +def dict2toml(in_dict: dict, file:str): + if not isinstance(in_dict, dict): + return "" + with open(file, 'w') as f: + toml.dump(in_dict, f) + +tdCom = TDCom() diff --git a/tests/army/frame/constant.py b/tests/army/frame/constant.py new file mode 100644 index 0000000000..c4541386b4 --- /dev/null +++ b/tests/army/frame/constant.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +# basic type +TAOS_DATA_TYPE = [ + "INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED", + "FLOAT", "DOUBLE", + "BOOL", + "BINARY", "NCHAR", "VARCHAR", + "TIMESTAMP", + # "MEDIUMBLOB", "BLOB", # add in 3.x + # "DECIMAL", "NUMERIC", # add in 3.x + "JSON", # only for tag +] + +TAOS_NUM_TYPE = [ + "INT", "BIGINT", "SMALLINT", "TINYINT", "INT UNSIGNED", "BIGINT UNSIGNED", "SMALLINT UNSIGNED", "TINYINT UNSIGNED", "FLOAT", "DOUBLE", + # "DECIMAL", "NUMERIC", # add in 3.x +] +TAOS_CHAR_TYPE = [ + "BINARY", "NCHAR", "VARCHAR", +] +TAOS_BOOL_TYPE = ["BOOL",] +TAOS_TS_TYPE = ["TIMESTAMP",] +TAOS_BIN_TYPE = [ + "MEDIUMBLOB", "BLOB", # add in 3.x +] + +TAOS_TIME_INIT = ["b", "u", "a", "s", "m", "h", "d", "w", "n", "y"] +TAOS_PRECISION = ["ms", "us", "ns"] +PRECISION_DEFAULT = "ms" +PRECISION = PRECISION_DEFAULT + +TAOS_KEYWORDS = [ + "ABORT", "CREATE", "IGNORE", "NULL", "STAR", + "ACCOUNT", "CTIME", "IMMEDIATE", "OF", "STATE", + "ACCOUNTS", "DATABASE", "IMPORT", "OFFSET", "STATEMENT", + "ADD", "DATABASES", "IN", "OR", "STATE_WINDOW", + "AFTER", "DAYS", "INITIALLY", "ORDER", "STORAGE", + "ALL", "DBS", "INSERT", "PARTITIONS", "STREAM", + "ALTER", "DEFERRED", "INSTEAD", "PASS", "STREAMS", + "AND", "DELIMITERS", "INT", "PLUS", "STRING", + "AS", "DESC", "INTEGER", "PPS", "SYNCDB", + "ASC", "DESCRIBE", "INTERVAL", "PRECISION", "TABLE", + "ATTACH", "DETACH", "INTO", "PREV", "TABLES", + "BEFORE", "DISTINCT", "IS", "PRIVILEGE", "TAG", + "BEGIN", "DIVIDE", "ISNULL", "QTIME", "TAGS", + "BETWEEN", "DNODE", "JOIN", "QUERIES", "TBNAME", + "BIGINT", "DNODES", "KEEP", "QUERY", "TIMES", + "BINARY", "DOT", "KEY", "QUORUM", "TIMESTAMP", + "BITAND", "DOUBLE", "KILL", "RAISE", "TINYINT", + "BITNOT", "DROP", "LE", "REM", "TOPIC", + "BITOR", "EACH", "LIKE", "REPLACE", "TOPICS", + "BLOCKS", "END", "LIMIT", "REPLICA", "TRIGGER", + "BOOL", "EQ", "LINEAR", "RESET", "TSERIES", + "BY", "EXISTS", "LOCAL", "RESTRICT", "UMINUS", + "CACHE", "EXPLAIN", "LP", "ROW", "UNION", + "CACHEMODEL", "FAIL", "LSHIFT", "RP", "UNSIGNED", + "CASCADE", "FILE", "LT", "RSHIFT", "UPDATE", + "CHANGE", "FILL", "MATCH", "SCORES", "UPLUS", + "CLUSTER", "FLOAT", "MAXROWS", "SELECT", "USE", + "COLON", "FOR", "MINROWS", "SEMI", "USER", + "COLUMN", "FROM", "MINUS", "SESSION", "USERS", + "COMMA", "FSYNC", "MNODES", "SET", "USING", + "COMP", "GE", "MODIFY", "SHOW", "VALUES", + "COMPACT", "GLOB", "MODULES", "SLASH", "VARIABLE", + "CONCAT", "GRANTS", "NCHAR", "SLIDING", "VARIABLES", + "CONFLICT", "GROUP", "NE", "SLIMIT", "VGROUPS", + "CONNECTION", "GT", "NONE", "SMALLINT", "VIEW", + "CONNECTIONS", "HAVING", "NOT", "SOFFSET", "VNODES", + "CONNS", "ID", "NOTNULL", "STABLE", "WAL", + "COPY", "IF", "NOW", "STABLES", "WHERE", +] + +NUM_FUNC = [ + "ABS", "ACOS", "ASIN", "ATAN", "CEIL", "COS", "FLOOR", "LOG", "POW", "ROUND", "SIN", "SQRT", "TAN", +] + +STR_FUNC = [ + "CHAR_LENGTH", "CONCAT", "CONCAT_WS", "LENGTH", "LOWER","LTRIM", "RTRIM", "SUBSTR", "UPPER", +] + +CONVER_FUNC = ["CASR", "TO_ISO8601", "TO_JSON", "TP_UNIXTIMESTAMP"] + +SELECT_FUNC = [ + "APERCENTILE", "BOTTOM", "FIRST", "INTERP", "LAST", "MAX", "MIN", "PERCENTILE", "TAIL", "TOP", "UNIQUE", +] + +AGG_FUNC = [ + "AVG", "COUNT", "ELAPSED", "LEASTSQUARES", "MODE", "SPREAD", "STDDEV", "SUM", "HYPERLOGLOG", "HISTOGRAM", +] + +TS_FUNC = [ + "CSUM", "DERIVATIVE", "DIFF", "IRATE", "MAVG", "SAMPLE", "STATECOUNT", "STATEDURATION", "TWA" +] + +SYSINFO_FUNC = [ + "DATABASE", "CLIENT_VERSION", "SERVER_VERSION", "SERVER_STATUS", "CURRENT_USER", "USER" +] + + + +# basic data type boundary +TINYINT_MAX = 127 +TINYINT_MIN = -128 + +TINYINT_UN_MAX = 255 +TINYINT_UN_MIN = 0 + +SMALLINT_MAX = 32767 +SMALLINT_MIN = -32768 + +SMALLINT_UN_MAX = 65535 +SMALLINT_UN_MIN = 0 + +INT_MAX = 2_147_483_647 +INT_MIN = -2_147_483_648 + +INT_UN_MAX = 4_294_967_295 +INT_UN_MIN = 0 + +BIGINT_MAX = 9_223_372_036_854_775_807 +BIGINT_MIN = -9_223_372_036_854_775_808 + +BIGINT_UN_MAX = 18_446_744_073_709_551_615 +BIGINT_UN_MIN = 0 + +FLOAT_MAX = 3.40E+38 +FLOAT_MIN = -3.40E+38 + +DOUBLE_MAX = 1.7E+308 +DOUBLE_MIN = -1.7E+308 + +# schema boundary +BINARY_LENGTH_MAX = 16374 +NCAHR_LENGTH_MAX = 4093 +DBNAME_LENGTH_MAX = 64 + +STBNAME_LENGTH_MAX = 192 +STBNAME_LENGTH_MIN = 1 + +TBNAME_LENGTH_MAX = 192 +TBNAME_LENGTH_MIN = 1 + +CHILD_TBNAME_LENGTH_MAX = 192 +CHILD_TBNAME_LENGTH_MIN = 1 + +TAG_NAME_LENGTH_MAX = 64 +TAG_NAME_LENGTH_MIN = 1 + +COL_NAME_LENGTH_MAX = 64 +COL_NAME_LENGTH_MIN = 1 + +TAG_COUNT_MAX = 128 +TAG_COUNT_MIN = 1 + +COL_COUNT_MAX = 4096 +COL_COUNT_MIN = 2 + +TAG_COL_COUNT_MAX = 4096 +TAG_COL_COUNT_MIN = 3 + +MNODE_SHM_SIZE_MAX = 2_147_483_647 +MNODE_SHM_SIZE_MIN = 6_292_480 +MNODE_SHM_SIZE_DEFAULT = 6_292_480 + +VNODE_SHM_SIZE_MAX = 2_147_483_647 +VNODE_SHM_SIZE_MIN = 6_292_480 +VNODE_SHM_SIZE_DEFAULT = 31_458_304 + +# time_init +TIME_MS = 1 +TIME_US = TIME_MS/1000 +TIME_NS = TIME_US/1000 + +TIME_S = 1000 * TIME_MS +TIME_M = 60 * TIME_S +TIME_H = 60 * TIME_M +TIME_D = 24 * TIME_H +TIME_W = 7 * TIME_D +TIME_N = 30 * TIME_D +TIME_Y = 365 * TIME_D + + +# session parameters +INTERVAL_MIN = 1 * TIME_MS if PRECISION == PRECISION_DEFAULT else 1 * TIME_US + + +# streams and related agg-function +SMA_INDEX_FUNCTIONS = ["MIN", "MAX"] +ROLLUP_FUNCTIONS = ["AVG", "SUM", "MIN", "MAX", "LAST", "FIRST"] +BLOCK_FUNCTIONS = ["SUM", "MIN", "MAX"] +SMA_WATMARK_MAXDELAY_INIT = ['a', "s", "m"] +WATERMARK_MAX = 900000 +WATERMARK_MIN = 0 + +MAX_DELAY_MAX = 900000 +MAX_DELAY_MIN = 1 \ No newline at end of file diff --git a/tests/army/frame/dnodes-default.py b/tests/army/frame/dnodes-default.py new file mode 100644 index 0000000000..6809c1082c --- /dev/null +++ b/tests/army/frame/dnodes-default.py @@ -0,0 +1,502 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import subprocess +from util.log import * + + +class TDSimClient: + def __init__(self): + self.testCluster = False + + self.cfgDict = { + "numOfLogLines": "100000000", + "numOfThreadsPerCore": "2.0", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "minTablesPerVnode": "4", + "maxTablesPerVnode": "1000", + "tableIncStepPerVnode": "10000", + "maxVgroupsPerDb": "1000", + "sdbDebugFlag": "143", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + "telemetryReporting": "0", + } + def init(self, path): + self.__init__() + self.path = path + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + + def init(self, path): + self.path = path + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") + self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfg("dataDir", self.dataDir) + self.cfg("logDir", self.logDir) + self.cfg("numOfLogLines", "100000000") + self.cfg("mnodeEqualVnodeNum", "0") + self.cfg("walLevel", "2") + self.cfg("fsync", "1000") + self.cfg("statusInterval", "1") + self.cfg("numOfMnodes", "3") + self.cfg("numOfThreadsPerCore", "2.0") + self.cfg("monitor", "0") + self.cfg("maxVnodeConnections", "30000") + self.cfg("maxMgmtConnections", "30000") + self.cfg("maxMeterConnections", "30000") + self.cfg("maxShellConns", "30000") + self.cfg("locale", "en_US.UTF-8") + self.cfg("charset", "UTF-8") + self.cfg("asyncLog", "0") + self.cfg("anyIp", "0") + self.cfg("dDebugFlag", "135") + self.cfg("mDebugFlag", "135") + self.cfg("sdbDebugFlag", "135") + self.cfg("rpcDebugFlag", "135") + self.cfg("tmrDebugFlag", "131") + self.cfg("cDebugFlag", "135") + self.cfg("httpDebugFlag", "135") + self.cfg("monitorDebugFlag", "135") + self.cfg("udebugFlag", "135") + self.cfg("jnidebugFlag", "135") + self.cfg("qdebugFlag", "135") + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) + + def stop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = os.path.join(self.path,"sim","psim") + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + + def init(self, path): + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + tdLog.debug("binPath real path %s" % (binPath)) + + # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) + # tdLog.debug(cmd) + # os.system(cmd) + + # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + if path == "": + # self.path = os.path.expanduser('~') + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path) + + self.sim = TDSimClient() + self.sim.init(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def deploy(self, index): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy() + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].deploy() + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def stopAll(self): + tdLog.info("stop all dnodes") + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + +tdDnodes = TDDnodes() diff --git a/tests/army/frame/dnodes-no-random-fail.py b/tests/army/frame/dnodes-no-random-fail.py new file mode 100644 index 0000000000..0e433e9664 --- /dev/null +++ b/tests/army/frame/dnodes-no-random-fail.py @@ -0,0 +1,500 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import subprocess +from util.log import * + + +class TDSimClient: + def __init__(self): + self.testCluster = False + + self.cfgDict = { + "numOfLogLines": "100000000", + "numOfThreadsPerCore": "2.0", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "anyIp": "0", + "sdbDebugFlag": "135", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + "telemetryReporting": "0", + } + + def init(self, path): + self.__init__() + self.path = path + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + + def init(self, path): + self.path = path + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") + self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfg("dataDir", self.dataDir) + self.cfg("logDir", self.logDir) + self.cfg("numOfLogLines", "100000000") + self.cfg("mnodeEqualVnodeNum", "0") + self.cfg("walLevel", "2") + self.cfg("fsync", "1000") + self.cfg("statusInterval", "1") + self.cfg("numOfMnodes", "3") + self.cfg("numOfThreadsPerCore", "2.0") + self.cfg("monitor", "0") + self.cfg("maxVnodeConnections", "30000") + self.cfg("maxMgmtConnections", "30000") + self.cfg("maxMeterConnections", "30000") + self.cfg("maxShellConns", "30000") + self.cfg("locale", "en_US.UTF-8") + self.cfg("charset", "UTF-8") + self.cfg("asyncLog", "0") + self.cfg("anyIp", "0") + self.cfg("dDebugFlag", "135") + self.cfg("mDebugFlag", "135") + self.cfg("sdbDebugFlag", "135") + self.cfg("rpcDebugFlag", "135") + self.cfg("tmrDebugFlag", "131") + self.cfg("cDebugFlag", "135") + self.cfg("httpDebugFlag", "135") + self.cfg("monitorDebugFlag", "135") + self.cfg("udebugFlag", "135") + self.cfg("jnidebugFlag", "135") + self.cfg("qdebugFlag", "135") + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s --random-file-fail-factor 0 > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) + + def stop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = os.path.join(self.path,"sim","psim") + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + + def init(self, path): + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + tdLog.debug("binPath real path %s" % (binPath)) + + # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) + # tdLog.debug(cmd) + # os.system(cmd) + + # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + if path == "": + # self.path = os.path.expanduser('~') + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path) + + self.sim = TDSimClient() + self.sim.init(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def deploy(self, index): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy() + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].deploy() + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def stopAll(self): + tdLog.info("stop all dnodes") + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + +tdDnodes = TDDnodes() diff --git a/tests/army/frame/dnodes-random-fail.py b/tests/army/frame/dnodes-random-fail.py new file mode 100644 index 0000000000..1c527290ec --- /dev/null +++ b/tests/army/frame/dnodes-random-fail.py @@ -0,0 +1,497 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import subprocess +from util.log import * + + +class TDSimClient: + def __init__(self): + self.testCluster = False + + self.cfgDict = { + "numOfLogLines": "100000000", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "udebugFlag": "135", + "jnidebugFlag": "135", + "qdebugFlag": "135", + "telemetryReporting": "0", + } + + def init(self, path): + self.__init__() + self.path = path + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + + def init(self, path): + self.path = path + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def deploy(self): + self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") + self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "mkdir -p " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfg("dataDir", self.dataDir) + self.cfg("logDir", self.logDir) + self.cfg("numOfLogLines", "100000000") + self.cfg("mnodeEqualVnodeNum", "0") + self.cfg("walLevel", "2") + self.cfg("fsync", "1000") + self.cfg("statusInterval", "1") + self.cfg("numOfMnodes", "3") + self.cfg("numOfThreadsPerCore", "2.0") + self.cfg("monitor", "0") + self.cfg("maxVnodeConnections", "30000") + self.cfg("maxMgmtConnections", "30000") + self.cfg("maxMeterConnections", "30000") + self.cfg("maxShellConns", "30000") + self.cfg("locale", "en_US.UTF-8") + self.cfg("charset", "UTF-8") + self.cfg("asyncLog", "0") + self.cfg("anyIp", "0") + self.cfg("dDebugFlag", "135") + self.cfg("mDebugFlag", "135") + self.cfg("sdbDebugFlag", "135") + self.cfg("rpcDebugFlag", "135") + self.cfg("tmrDebugFlag", "131") + self.cfg("cDebugFlag", "135") + self.cfg("httpDebugFlag", "135") + self.cfg("monitorDebugFlag", "135") + self.cfg("udebugFlag", "135") + self.cfg("jnidebugFlag", "135") + self.cfg("qdebugFlag", "135") + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def start(self): + buildPath = self.getBuildPath() + + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + + binPath = buildPath + "/build/bin/taosd" + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + cmd = "nohup %s -c %s --alloc-random-fail --random-file-fail-factor 5 > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes" + + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) + + def stop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + while(processID): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = os.path.join(self.path,"sim","psim") + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + + def init(self, path): + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + tdLog.debug("binPath real path %s" % (binPath)) + + # cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath) + # tdLog.debug(cmd) + # os.system(cmd) + + # cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + # cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + # tdLog.debug("execute %s" % (cmd)) + + if path == "": + # self.path = os.path.expanduser('~') + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path) + + self.sim = TDSimClient() + self.sim.init(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def deploy(self, index): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy() + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].deploy() + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def stopAll(self): + tdLog.info("stop all dnodes") + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8") + while(processID): + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + +tdDnodes = TDDnodes() diff --git a/tests/army/frame/dnodes.py b/tests/army/frame/dnodes.py new file mode 100644 index 0000000000..67c3d37960 --- /dev/null +++ b/tests/army/frame/dnodes.py @@ -0,0 +1,890 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import os.path +import platform +import distro +import subprocess +from time import sleep +import base64 +import json +import copy +from fabric2 import Connection +from util.log import * +from shutil import which + + +class TDSimClient: + def __init__(self, path): + self.testCluster = False + self.path = path + self.cfgDict = { + "fqdn": "localhost", + "numOfLogLines": "100000000", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "rpcDebugFlag": "135", + "tmrDebugFlag": "131", + "cDebugFlag": "135", + "uDebugFlag": "135", + "jniDebugFlag": "135", + "qDebugFlag": "135", + "supportVnodes": "1024", + "enableQueryHb": "1", + "telemetryReporting": "0", + "tqDebugflag": "135", + "wDebugflag":"135", + } + + def getLogDir(self): + self.logDir = os.path.join(self.path,"sim","psim","log") + return self.logDir + + def getCfgDir(self): + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + return self.cfgDir + + def setTestCluster(self, value): + self.testCluster = value + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self, *updatecfgDict): + self.logDir = os.path.join(self.path,"sim","psim","log") + self.cfgDir = os.path.join(self.path,"sim","psim","cfg") + self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg") + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.logDir) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.cfgDir) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("logDir", self.logDir) + + for key, value in self.cfgDict.items(): + self.cfg(key, value) + + try: + if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]: + clientCfg = dict (updatecfgDict[0][0].get('clientCfg')) + for key, value in clientCfg.items(): + self.cfg(key, value) + except Exception: + pass + + tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath)) + + +class TDDnode: + def __init__(self, index): + self.index = index + self.running = 0 + self.deployed = 0 + self.testCluster = False + self.valgrind = 0 + self.asan = False + self.remoteIP = "" + self.cfgDict = { + "fqdn": "localhost", + "monitor": "0", + "maxShellConns": "30000", + "locale": "en_US.UTF-8", + "charset": "UTF-8", + "asyncLog": "0", + "mDebugFlag": "143", + "dDebugFlag": "143", + "vDebugFlag": "143", + "tqDebugFlag": "143", + "cDebugFlag": "143", + "stDebugFlag": "143", + "smaDebugFlag": "143", + "jniDebugFlag": "143", + "qDebugFlag": "143", + "rpcDebugFlag": "143", + "tmrDebugFlag": "131", + "uDebugFlag": "135", + "sDebugFlag": "135", + "wDebugFlag": "135", + "numOfLogLines": "100000000", + "statusInterval": "1", + "enableQueryHb": "1", + "supportVnodes": "1024", + "telemetryReporting": "0" + } + + def init(self, path, remoteIP = ""): + self.path = path + self.remoteIP = remoteIP + if (not self.remoteIP == ""): + try: + self.config = eval(self.remoteIP) + self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]}) + except Exception as r: + print(r) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def setAsan(self, value): + self.asan = value + if value: + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + self.execPath = os.path.abspath(self.path + "/community/tests/script/sh/exec.sh") + else: + self.execPath = os.path.abspath(self.path + "/tests/script/sh/exec.sh") + + def getDataSize(self): + totalSize = 0 + + if (self.deployed == 1): + for dirpath, dirnames, filenames in os.walk(self.dataDir): + for f in filenames: + fp = os.path.join(dirpath, f) + + if not os.path.islink(fp): + totalSize = totalSize + os.path.getsize(fp) + + return totalSize + + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def remoteExec(self, updateCfgDict, execCmd): + valgrindStr = '' + if (self.valgrind==1): + valgrindStr = '-g' + remoteCfgDict = copy.deepcopy(updateCfgDict) + if ("logDir" in remoteCfgDict): + del remoteCfgDict["logDir"] + if ("dataDir" in remoteCfgDict): + del remoteCfgDict["dataDir"] + if ("cfgDir" in remoteCfgDict): + del remoteCfgDict["cfgDir"] + remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode() + execCmdStr = base64.b64encode(execCmd.encode()).decode() + with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')): + self.remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr)) + + def deploy(self, *updatecfgDict): + self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log") + self.dataDir = os.path.join(self.path,"sim","dnode%d" % self.index, "data") + self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg") + self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg") + + cmd = "rm -rf " + self.dataDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.logDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + cmd = "rm -rf " + self.cfgDir + if os.system(cmd) != 0: + tdLog.exit(cmd) + + # cmd = "mkdir -p " + self.dataDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.dataDir) + + # cmd = "mkdir -p " + self.logDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.logDir) + + # cmd = "mkdir -p " + self.cfgDir + # if os.system(cmd) != 0: + # tdLog.exit(cmd) + os.makedirs(self.cfgDir) + + cmd = "touch " + self.cfgPath + if os.system(cmd) != 0: + tdLog.exit(cmd) + + if self.testCluster: + self.startIP() + + if self.testCluster: + self.cfg("masterIp", "192.168.0.1") + self.cfg("secondIp", "192.168.0.2") + self.cfg("publicIp", "192.168.0.%d" % (self.index)) + self.cfg("internalIp", "192.168.0.%d" % (self.index)) + self.cfg("privateIp", "192.168.0.%d" % (self.index)) + self.cfgDict["dataDir"] = self.dataDir + self.cfgDict["logDir"] = self.logDir + # self.cfg("dataDir",self.dataDir) + # self.cfg("logDir",self.logDir) + # print(updatecfgDict) + isFirstDir = 1 + if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]: + for key, value in updatecfgDict[0][0].items(): + if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows': + continue + if value == 'dataDir': + if isFirstDir: + self.cfgDict.pop('dataDir') + self.cfg(value, key) + isFirstDir = 0 + else: + self.cfg(value, key) + else: + self.addExtraCfg(key, value) + if (self.remoteIP == ""): + for key, value in self.cfgDict.items(): + self.cfg(key, value) + else: + self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index) + + self.deployed = 1 + tdLog.debug( + "dnode:%d is deployed and configured by %s" % + (self.index, self.cfgPath)) + + def getPath(self, tool="taosd"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files or ("%s.exe"%tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] + + def starttaosd(self): + binPath = self.getPath() + + if (binPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found: %s" % binPath) + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s -c %s" % ( + binPath, self.cfgDir) + else: + if self.asan: + asanDir = "%s/sim/asan/dnode%d.asan" % ( + self.path, self.index) + cmd = "nohup %s -c %s > /dev/null 2> %s & " % ( + binPath, self.cfgDir, asanDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir + + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index)) + self.running = 1 + else: + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key1 = 'from offline to online' + bkey1 = bytes(key1, encoding="utf8") + key2= 'TDengine initialized successfully' + bkey2 = bytes(key2, encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + # while not os.path.exists(logFile): + # sleep(0.1) + # i += 1 + # if i > 10: + # break + # tailCmdStr = 'tail -f ' + # if platform.system().lower() == 'windows': + # tailCmdStr = 'tail -n +0 -f ' + # popen = subprocess.Popen( + # tailCmdStr + logFile, + # stdout=subprocess.PIPE, + # stderr=subprocess.PIPE, + # shell=True) + # pid = popen.pid + # # print('Popen.pid:' + str(pid)) + # timeout = time.time() + 60 * 2 + # while True: + # line = popen.stdout.readline().strip() + # print(line) + # if bkey1 in line: + # popen.kill() + # break + # elif bkey2 in line: + # popen.kill() + # break + # if time.time() > timeout: + # print(time.time(),timeout) + # tdLog.exit('wait too long for taosd start') + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug( + "wait 10 seconds for the dnode:%d to start." % + (self.index)) + time.sleep(10) + + def start(self): + binPath = self.getPath() + + if (binPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found: %s" % binPath) + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s -c %s" % ( + binPath, self.cfgDir) + else: + if self.asan: + asanDir = "%s/sim/asan/dnode%d.asan" % ( + self.path, self.index) + cmd = "nohup %s -c %s > /dev/null 2> %s & " % ( + binPath, self.cfgDir, asanDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir + + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + + print(cmd) + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index)) + self.running = 1 + else: + os.system("rm -rf %s/taosdlog.0"%self.logDir) + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key = 'from offline to online' + bkey = bytes(key, encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + while not os.path.exists(logFile): + sleep(0.1) + i += 1 + if i > 50: + break + with open(logFile) as f: + timeout = time.time() + 10 * 2 + while True: + line = f.readline().encode('utf-8') + if bkey in line: + break + if time.time() > timeout: + tdLog.exit('wait too long for taosd start') + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug( + "wait 10 seconds for the dnode:%d to start." % + (self.index)) + time.sleep(10) + + def startWithoutSleep(self): + binPath = self.getPath() + + if (binPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found: %s" % binPath) + + if self.deployed == 0: + tdLog.exit("dnode:%d is not deployed" % (self.index)) + + if self.valgrind == 0: + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s -c %s" % (binPath, self.cfgDir) + else: + if self.asan: + asanDir = "%s/sim/asan/dnode%d.asan" % ( + self.path, self.index) + cmd = "nohup %s -c %s > /dev/null 2> %s & " % ( + binPath, self.cfgDir, asanDir) + else: + cmd = "nohup %s -c %s > /dev/null 2>&1 & " % ( + binPath, self.cfgDir) + else: + valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir + if platform.system().lower() == 'windows': + cmd = "mintty -h never %s %s -c %s" % ( + valgrindCmdline, binPath, self.cfgDir) + else: + cmd = "nohup %s %s -c %s 2>&1 & " % ( + valgrindCmdline, binPath, self.cfgDir) + print(cmd) + + if (self.remoteIP == ""): + if os.system(cmd) != 0: + tdLog.exit(cmd) + else: + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.startWithoutSleep(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index)) + + self.running = 1 + tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + + def stop(self): + if self.asan: + stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index) + tdLog.info("execute script: " + stopCmd) + os.system(stopCmd) + return + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1)) + tdLog.info("stop dnode%d"%self.index) + return + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + onlyKillOnceWindows = 0 + while(processID): + if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + if platform.system().lower() == 'windows': + killCmd = "kill -INT %s > nul 2>&1" % processID + os.system(killCmd) + onlyKillOnceWindows = 1 + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + if not platform.system().lower() == 'windows': + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d > /dev/null" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + + def stoptaosd(self): + tdLog.debug("start to stop taosd on dnode: %d "% (self.index)) + # print(self.asan,self.running,self.remoteIP,self.valgrind) + if self.asan: + stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index) + tdLog.info("execute script: " + stopCmd) + os.system(stopCmd) + return + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1)) + tdLog.info("stop dnode%d"%self.index) + return + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + if platform.system().lower() == 'windows': + psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^' ^&^& echo aa') do @(ps | grep %%a | awk '{print $1}' | xargs)" % (self.index) + else: + psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}' | xargs" % (toBeKilled,self.index) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + onlyKillOnceWindows = 0 + while(processID): + if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): + killCmd = "kill -INT %s > /dev/null 2>&1" % processID + if platform.system().lower() == 'windows': + killCmd = "kill -INT %s > nul 2>&1" % processID + os.system(killCmd) + onlyKillOnceWindows = 1 + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index)) + + def forcestop(self): + if self.asan: + stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \ + (self.execPath, self.index) + tdLog.info("execute script: " + stopCmd) + os.system(stopCmd) + return + + if (not self.remoteIP == ""): + self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1)) + return + if self.valgrind == 0: + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + if self.running != 0: + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + onlyKillOnceWindows = 0 + while(processID): + if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'): + killCmd = "kill -KILL %s > /dev/null 2>&1" % processID + os.system(killCmd) + onlyKillOnceWindows = 1 + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + for port in range(6030, 6041): + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if self.valgrind: + time.sleep(2) + + self.running = 0 + tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index)) + + def startIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def stopIP(self): + cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % ( + self.index, self.index) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def cfg(self, option, value): + cmd = "echo %s %s >> %s" % (option, value, self.cfgPath) + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def getDnodeRootDir(self, index): + dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index) + return dnodeRootDir + + def getDnodesRootDir(self): + dnodesRootDir = os.path.join(self.path,"sim","psim") + return dnodesRootDir + + +class TDDnodes: + def __init__(self): + self.dnodes = [] + self.dnodes.append(TDDnode(1)) + self.dnodes.append(TDDnode(2)) + self.dnodes.append(TDDnode(3)) + self.dnodes.append(TDDnode(4)) + self.dnodes.append(TDDnode(5)) + self.dnodes.append(TDDnode(6)) + self.dnodes.append(TDDnode(7)) + self.dnodes.append(TDDnode(8)) + self.dnodes.append(TDDnode(9)) + self.dnodes.append(TDDnode(10)) + self.simDeployed = False + self.testCluster = False + self.valgrind = 0 + self.asan = False + self.killValgrind = 0 + + def init(self, path, remoteIP = ""): + binPath = self.dnodes[0].getPath() + "/../../../" + # tdLog.debug("binPath %s" % (binPath)) + binPath = os.path.realpath(binPath) + # tdLog.debug("binPath real path %s" % (binPath)) + + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + for i in range(len(self.dnodes)): + self.dnodes[i].init(self.path, remoteIP) + self.sim = TDSimClient(self.path) + + def setTestCluster(self, value): + self.testCluster = value + + def setValgrind(self, value): + self.valgrind = value + + def setAsan(self, value): + self.asan = value + if value: + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + self.stopDnodesPath = os.path.abspath(self.path + "/community/tests/script/sh/stop_dnodes.sh") + self.stopDnodesSigintPath = os.path.abspath(self.path + "/community/tests/script/sh/sigint_stop_dnodes.sh") + else: + self.stopDnodesPath = os.path.abspath(self.path + "/tests/script/sh/stop_dnodes.sh") + self.stopDnodesSigintPath = os.path.abspath(self.path + "/tests/script/sh/sigint_stop_dnodes.sh") + tdLog.info("run in address sanitizer mode") + + def setKillValgrind(self, value): + self.killValgrind = value + + def deploy(self, index, *updatecfgDict): + self.sim.setTestCluster(self.testCluster) + + if (self.simDeployed == False): + self.sim.deploy(updatecfgDict) + self.simDeployed = True + + self.check(index) + self.dnodes[index - 1].setTestCluster(self.testCluster) + self.dnodes[index - 1].setValgrind(self.valgrind) + self.dnodes[index - 1].setAsan(self.asan) + self.dnodes[index - 1].deploy(updatecfgDict) + + def cfg(self, index, option, value): + self.check(index) + self.dnodes[index - 1].cfg(option, value) + + def starttaosd(self, index): + self.check(index) + self.dnodes[index - 1].starttaosd() + + def stoptaosd(self, index): + self.check(index) + self.dnodes[index - 1].stoptaosd() + + def start(self, index): + self.check(index) + self.dnodes[index - 1].start() + + def startWithoutSleep(self, index): + self.check(index) + self.dnodes[index - 1].startWithoutSleep() + + def stop(self, index): + self.check(index) + self.dnodes[index - 1].stop() + + def getDataSize(self, index): + self.check(index) + return self.dnodes[index - 1].getDataSize() + + def forcestop(self, index): + self.check(index) + self.dnodes[index - 1].forcestop() + + def startIP(self, index): + self.check(index) + + if self.testCluster: + self.dnodes[index - 1].startIP() + + def stopIP(self, index): + self.check(index) + + if self.dnodes[index - 1].testCluster: + self.dnodes[index - 1].stopIP() + + def check(self, index): + if index < 1 or index > 10: + tdLog.exit("index:%d should on a scale of [1, 10]" % (index)) + + def StopAllSigint(self): + tdLog.info("stop all dnodes sigint, asan:%d" % self.asan) + if self.asan: + tdLog.info("execute script: %s" % self.stopDnodesSigintPath) + os.system(self.stopDnodesSigintPath) + tdLog.info("execute finished") + return + + def killProcesser(self, processerName): + if platform.system().lower() == 'windows': + killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName) + psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName)) + else: + killCmd = ( + "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" + % processerName + ) + psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName) + + processID = "" + + try: + processID = subprocess.check_output(psCmd, shell=True) + while processID: + os.system(killCmd) + time.sleep(1) + try: + processID = subprocess.check_output(psCmd, shell=True) + except Exception as err: + processID = "" + tdLog.debug('**** kill pid warn: {err}') + except Exception as err: + processID = "" + tdLog.debug(f'**** find pid warn: {err}') + + + + def stopAll(self): + tdLog.info("stop all dnodes, asan:%d" % self.asan) + if platform.system().lower() != 'windows': + distro_id = distro.id() + else: + distro_id = "not alpine" + if self.asan and distro_id != "alpine": + tdLog.info("execute script: %s" % self.stopDnodesPath) + os.system(self.stopDnodesPath) + tdLog.info("execute finished") + return + + if (not self.dnodes[0].remoteIP == ""): + self.dnodes[0].remoteExec(self.dnodes[0].cfgDict, "for i in range(len(tdDnodes.dnodes)):\n tdDnodes.dnodes[i].running=1\ntdDnodes.stopAll()") + return + for i in range(len(self.dnodes)): + self.dnodes[i].stop() + + + if (distro_id == "alpine"): + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + print(processID) + killCmd = "kill -9 %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + elif platform.system().lower() == 'windows': + self.killProcesser("taosd") + self.killProcesser("tmq_sim") + self.killProcesser("taosBenchmark") + else: + psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + if processID: + cmd = "sudo systemctl stop taosd" + os.system(cmd) + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + killCmd = "kill -9 %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + if self.killValgrind == 1: + psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + if platform.system().lower() == 'windows': + killCmd = "kill -TERM %s > nul 2>&1" % processID + else: + killCmd = "kill -TERM %s > /dev/null 2>&1" % processID + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8").strip() + + # if os.system(cmd) != 0 : + # tdLog.exit(cmd) + + def getDnodesRootDir(self): + dnodesRootDir = "%s/sim" % (self.path) + return dnodesRootDir + + def getSimCfgPath(self): + return self.sim.getCfgDir() + + def getSimLogPath(self): + return self.sim.getLogDir() + + def addSimExtraCfg(self, option, value): + self.sim.addExtraCfg(option, value) + + def getAsan(self): + return self.asan + +tdDnodes = TDDnodes() \ No newline at end of file diff --git a/tests/army/frame/gettime.py b/tests/army/frame/gettime.py new file mode 100644 index 0000000000..d4a5e18dc9 --- /dev/null +++ b/tests/army/frame/gettime.py @@ -0,0 +1,65 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import time +from datetime import datetime + +class GetTime: + + def get_ms_timestamp(self,ts_str): + _ts_str = ts_str + if "+" in _ts_str: + timestamp = datetime.fromisoformat(_ts_str) + return int((timestamp-datetime.fromtimestamp(0,timestamp.tzinfo)).total_seconds())*1000+int(timestamp.microsecond / 1000) + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 15 : + _ts_str = ts_str[:-3] + if ':' in _ts_str and '.' in _ts_str: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S.%f") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + elif ':' in _ts_str and '.' not in _ts_str: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d %H:%M:%S") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + else: + timestamp = datetime.strptime(_ts_str, "%Y-%m-%d") + date_time = int(int(time.mktime(timestamp.timetuple()))*1000 + timestamp.microsecond/1000) + return date_time + def get_us_timestamp(self,ts_str): + _ts = self.get_ms_timestamp(ts_str) * 1000 + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 12: + us_ts = p[12:15] + _ts += int(us_ts) + return _ts + def get_ns_timestamp(self,ts_str): + _ts = self.get_us_timestamp(ts_str) *1000 + if " " in ts_str: + p = ts_str.split(" ")[1] + if len(p) > 15: + us_ts = p[15:] + _ts += int(us_ts) + return _ts + def time_transform(self,ts_str,precision): + date_time = [] + if precision == 'ms': + for i in ts_str: + date_time.append(self.get_ms_timestamp(i)) + elif precision == 'us': + for i in ts_str: + date_time.append(self.get_us_timestamp(i)) + elif precision == 'ns': + for i in ts_str: + date_time.append(self.get_ns_timestamp(i)) + return date_time \ No newline at end of file diff --git a/tests/army/frame/log.py b/tests/army/frame/log.py new file mode 100644 index 0000000000..000c907ea4 --- /dev/null +++ b/tests/army/frame/log.py @@ -0,0 +1,49 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +from distutils.log import warn as printf + + +class TDLog: + def __init__(self): + self.path = "" + + def info(self, info): + print("%s %s\n" % (datetime.datetime.now(), info)) + + def sleep(self, sec): + print("%s sleep %d seconds" % (datetime.datetime.now(), sec)) + time.sleep(sec) + + def debug(self, err): + print("\033[1;36m%s %s\033[0m" % (datetime.datetime.now(), err)) + + def success(self, info): + printf("\033[1;32m%s %s\033[0m" % (datetime.datetime.now(), info)) + + def notice(self, err): + print("\033[1;33m%s %s\033[0m" % (datetime.datetime.now(), err)) + + def exit(self, err): + print("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err)) + sys.exit(1) + + def printNoPrefix(self, info): + print("\033[1;36m%s\033[0m" % (info)) + + +tdLog = TDLog() diff --git a/tests/army/frame/pathFinding.py b/tests/army/frame/pathFinding.py new file mode 100644 index 0000000000..9dee5142ce --- /dev/null +++ b/tests/army/frame/pathFinding.py @@ -0,0 +1,83 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +import os +from util.log import * + + + +class TDFindPath: + """This class is for finding path within TDengine + """ + def __init__(self): + self.file = "" + + + def init(self, file): + """[summary] + + Args: + file (str): the file location you want to start the query. Generally using __file__ + """ + self.file = file + + def getTaosdemoPath(self): + """for finding the path of directory containing taosdemo + + Returns: + str: the path to directory containing taosdemo + """ + selfPath = os.path.dirname(os.path.realpath(self.file)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info(f"taosd found in {buildPath}") + return buildPath + "/build/bin/" + + def getTDenginePath(self): + """for finding the root path of TDengine + + Returns: + str: the root path of TDengine + """ + selfPath = os.path.dirname(os.path.realpath(self.file)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + print(projPath) + for root, dirs, files in os.walk(projPath): + if ("sim" in dirs): + print(root) + rootRealPath = os.path.realpath(root) + if (rootRealPath == ""): + tdLog.exit("TDengine not found!") + else: + tdLog.info(f"TDengine found in {rootRealPath}") + return rootRealPath + +tdFindPath = TDFindPath() \ No newline at end of file diff --git a/tests/army/frame/sql.py b/tests/army/frame/sql.py new file mode 100644 index 0000000000..c05df0a852 --- /dev/null +++ b/tests/army/frame/sql.py @@ -0,0 +1,655 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import inspect +import traceback +import psutil +import shutil +import pandas as pd +from util.log import * +from util.constant import * + +# from datetime import timezone +import time + +def _parse_ns_timestamp(timestr): + dt_obj = datetime.datetime.strptime(timestr[:len(timestr)-3], "%Y-%m-%d %H:%M:%S.%f") + tz = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e9) + int(dt_obj.microsecond * 1000) + int(timestr[-3:]) + return tz + + +def _parse_datetime(timestr): + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f') + except ValueError: + pass + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S') + except ValueError: + pass + +class TDSql: + def __init__(self): + self.queryRows = 0 + self.queryCols = 0 + self.affectedRows = 0 + + def init(self, cursor, log=False): + self.cursor = cursor + + if (log): + caller = inspect.getframeinfo(inspect.stack()[1][0]) + self.cursor.log(caller.filename + ".sql") + + def close(self): + self.cursor.close() + + def prepare(self, dbname="db", drop=True, **kwargs): + tdLog.info(f"prepare database:{dbname}") + s = 'reset query cache' + try: + self.cursor.execute(s) + except: + tdLog.notice("'reset query cache' is not supported") + if drop: + s = f'drop database if exists {dbname}' + self.cursor.execute(s) + s = f'create database {dbname}' + for k, v in kwargs.items(): + s += f" {k} {v}" + if "duration" not in kwargs: + s += " duration 300" + self.cursor.execute(s) + s = f'use {dbname}' + self.cursor.execute(s) + time.sleep(2) + + def error(self, sql, expectedErrno = None, expectErrInfo = None): + caller = inspect.getframeinfo(inspect.stack()[1][0]) + expectErrNotOccured = True + + try: + self.cursor.execute(sql) + except BaseException as e: + expectErrNotOccured = False + self.errno = e.errno + error_info = repr(e) + self.error_info = ','.join(error_info[error_info.index('(')+1:-1].split(",")[:-1]).replace("'","") + # self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","") + if expectErrNotOccured: + tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql)) + else: + self.queryRows = 0 + self.queryCols = 0 + self.queryResult = None + + if expectedErrno != None: + if expectedErrno == self.errno: + tdLog.info("sql:%s, expected errno %s occured" % (sql, expectedErrno)) + else: + tdLog.exit("%s(%d) failed: sql:%s, errno %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.errno, expectedErrno)) + else: + tdLog.info("sql:%s, expect error occured" % (sql)) + + if expectErrInfo != None: + if expectErrInfo == self.error_info or expectErrInfo in self.error_info: + tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo)) + else: + tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) + else: + tdLog.info("sql:%s, expect error occured" % (sql)) + + return self.error_info + + def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None): + self.sql = sql + i=1 + while i <= queryTimes: + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + + if count_expected_res is not None: + counter = 0 + while count_expected_res != self.queryResult[0][0]: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + if counter < queryTimes: + counter += 0.5 + time.sleep(0.5) + else: + return False + if row_tag: + return self.queryResult + return self.queryRows + except Exception as e: + tdLog.notice("Try to query again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 + time.sleep(1) + pass + + + def is_err_sql(self, sql): + err_flag = True + try: + self.cursor.execute(sql) + except BaseException: + err_flag = False + + return False if err_flag else True + + def getVariable(self, search_attr): + ''' + get variable of search_attr access "show variables" + ''' + try: + sql = 'show variables' + param_list = self.query(sql, row_tag=True) + for param in param_list: + if param[0] == search_attr: + return param[1], param_list + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + + def getColNameList(self, sql, col_tag=None): + self.sql = sql + try: + col_name_list = [] + col_type_list = [] + self.cursor.execute(sql) + for query_col in self.cursor.description: + col_name_list.append(query_col[0]) + col_type_list.append(query_col[1]) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + if col_tag: + return col_name_list, col_type_list + return col_name_list + + def waitedQuery(self, sql, expectRows, timeout): + tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout)) + self.sql = sql + try: + for i in range(timeout): + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + self.queryRows = len(self.queryResult) + self.queryCols = len(self.cursor.description) + tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows)) + if self.queryRows >= expectRows: + return (self.queryRows, i) + time.sleep(1) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return (self.queryRows, timeout) + + def getRows(self): + return self.queryRows + + def checkRows(self, expectRows): + if self.queryRows == expectRows: + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows)) + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows) + tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + + def checkRows_range(self, excepte_row_list): + if self.queryRows in excepte_row_list: + tdLog.info(f"sql:{self.sql}, queryRows:{self.queryRows} in expect:{excepte_row_list}") + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{self.sql}, queryRows:{self.queryRows} not in expect:{excepte_row_list}") + + def checkCols(self, expectCols): + if self.queryCols == expectCols: + tdLog.info("sql:%s, queryCols:%d == expect:%d" % (self.sql, self.queryCols, expectCols)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.queryCols, expectCols) + tdLog.exit("%s(%d) failed: sql:%s, queryCols:%d != expect:%d" % args) + + def checkRowCol(self, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, self.sql, row) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args) + if col < 0: + args = (caller.filename, caller.lineno, self.sql, row) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args) + if row > self.queryRows: + args = (caller.filename, caller.lineno, self.sql, row, self.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col > self.queryCols: + args = (caller.filename, caller.lineno, self.sql, col, self.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) + + def checkDataType(self, row, col, dataType): + self.checkRowCol(row, col) + return self.cursor.istype(col, dataType) + + + def checkData(self, row, col, data, show = False): + if row >= self.queryRows: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row+1, self.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col >= self.queryCols: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, col+1, self.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) + + self.checkRowCol(row, col) + + if self.queryResult[row][col] != data: + if self.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed`` + if isinstance(data,str) : + if (len(data) >= 28): + if self.queryResult[row][col] == _parse_ns_timestamp(data): + if(show): + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + else: + if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc): + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if(show): + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + elif isinstance(data,int): + if len(str(data)) == 16: + precision = 'us' + elif len(str(data)) == 13: + precision = 'ms' + elif len(str(data)) == 19: + precision = 'ns' + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + success = False + if precision == 'ms': + dt_obj = self.queryResult[row][col] + ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000) + if ts == data: + success = True + elif precision == 'us': + dt_obj = self.queryResult[row][col] + ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond) + if ts == data: + success = True + elif precision == 'ns': + if data == self.queryResult[row][col]: + success = True + if success: + if(show): + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + elif isinstance(data,datetime.datetime): + dt_obj = self.queryResult[row][col] + delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo) + delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo) + if delt_data == delt_result: + if(show): + tdLog.info("check successfully") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + + if str(self.queryResult[row][col]) == str(data): + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if(show): + tdLog.info("check successfully") + return + + elif isinstance(data, float): + if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if(show): + tdLog.info("check successfully") + elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: + # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") + if(show): + tdLog.info("check successfully") + + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + return + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + if(show): + tdLog.info("check successfully") + + # return true or false replace exit, no print out + def checkRowColNoExit(self, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, self.sql, row) + return False + if col < 0: + args = (caller.filename, caller.lineno, self.sql, row) + return False + if row > self.queryRows: + args = (caller.filename, caller.lineno, self.sql, row, self.queryRows) + return False + if col > self.queryCols: + args = (caller.filename, caller.lineno, self.sql, col, self.queryCols) + return False + + return True + + + # return true or false replace exit, no print out + def checkDataNoExit(self, row, col, data): + if self.checkRowColNoExit(row, col) == False: + return False + if self.queryResult[row][col] != data: + if self.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed + if (len(data) >= 28): + if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data): + return True + else: + if self.queryResult[row][col] == _parse_datetime(data): + return True + return False + + if str(self.queryResult[row][col]) == str(data): + return True + elif isinstance(data, float): + if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: + return True + elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: + return True + else: + return False + else: + return False + + return True + + + # loop execute sql then sleep(waitTime) , if checkData ok break loop + def checkDataLoop(self, row, col, data, sql, loopCount, waitTime): + # loop check util checkData return true + for i in range(loopCount): + self.query(sql) + if self.checkDataNoExit(row, col, data) : + self.checkData(row, col, data) + return + time.sleep(waitTime) + + # last check + self.query(sql) + self.checkData(row, col, data) + + + def getData(self, row, col): + self.checkRowCol(row, col) + return self.queryResult[row][col] + + def getResult(self, sql): + self.sql = sql + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return self.queryResult + + def executeTimes(self, sql, times): + for i in range(times): + try: + return self.cursor.execute(sql) + except BaseException: + time.sleep(1) + continue + + def execute(self, sql, queryTimes=30, show=False): + self.sql = sql + if show: + tdLog.info(sql) + i=1 + while i <= queryTimes: + try: + self.affectedRows = self.cursor.execute(sql) + return self.affectedRows + except Exception as e: + tdLog.notice("Try to execute sql again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 + time.sleep(1) + pass + + def checkAffectedRows(self, expectAffectedRows): + if self.affectedRows != expectAffectedRows: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, self.affectedRows, expectAffectedRows) + tdLog.exit("%s(%d) failed: sql:%s, affectedRows:%d != expect:%d" % args) + + tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows)) + + def checkColNameList(self, col_name_list, expect_col_name_list): + if col_name_list == expect_col_name_list: + tdLog.info("sql:%s, col_name_list:%s == expect_col_name_list:%s" % (self.sql, col_name_list, expect_col_name_list)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list) + tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args) + + def __check_equal(self, elm, expect_elm): + if elm == expect_elm: + return True + if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple): + if len(elm) != len(expect_elm): + return False + if len(elm) == 0: + return True + for i in range(len(elm)): + flag = self.__check_equal(elm[i], expect_elm[i]) + if not flag: + return False + return True + return False + + def checkEqual(self, elm, expect_elm): + if elm == expect_elm: + tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm)) + return + if self.__check_equal(elm, expect_elm): + tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm)) + return + + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) + # tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) + raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args) + + def checkNotEqual(self, elm, expect_elm): + if elm != expect_elm: + tdLog.info("sql:%s, elm:%s != expect_elm:%s" % (self.sql, elm, expect_elm)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, elm, expect_elm) + tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args) + raise Exception + + def get_times(self, time_str, precision="ms"): + caller = inspect.getframeinfo(inspect.stack()[1][0]) + if time_str[-1] not in TAOS_TIME_INIT: + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {time_str} not a standard taos time init") + if precision not in TAOS_PRECISION: + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: {precision} not a standard taos time precision") + + if time_str[-1] == TAOS_TIME_INIT[0]: + times = int(time_str[:-1]) * TIME_NS + if time_str[-1] == TAOS_TIME_INIT[1]: + times = int(time_str[:-1]) * TIME_US + if time_str[-1] == TAOS_TIME_INIT[2]: + times = int(time_str[:-1]) * TIME_MS + if time_str[-1] == TAOS_TIME_INIT[3]: + times = int(time_str[:-1]) * TIME_S + if time_str[-1] == TAOS_TIME_INIT[4]: + times = int(time_str[:-1]) * TIME_M + if time_str[-1] == TAOS_TIME_INIT[5]: + times = int(time_str[:-1]) * TIME_H + if time_str[-1] == TAOS_TIME_INIT[6]: + times = int(time_str[:-1]) * TIME_D + if time_str[-1] == TAOS_TIME_INIT[7]: + times = int(time_str[:-1]) * TIME_W + if time_str[-1] == TAOS_TIME_INIT[8]: + times = int(time_str[:-1]) * TIME_N + if time_str[-1] == TAOS_TIME_INIT[9]: + times = int(time_str[:-1]) * TIME_Y + + if precision == "ms": + return int(times) + elif precision == "us": + return int(times*1000) + elif precision == "ns": + return int(times*1000*1000) + + def get_type(self, col): + if self.cursor.istype(col, "BOOL"): + return "BOOL" + if self.cursor.istype(col, "INT"): + return "INT" + if self.cursor.istype(col, "BIGINT"): + return "BIGINT" + if self.cursor.istype(col, "TINYINT"): + return "TINYINT" + if self.cursor.istype(col, "SMALLINT"): + return "SMALLINT" + if self.cursor.istype(col, "FLOAT"): + return "FLOAT" + if self.cursor.istype(col, "DOUBLE"): + return "DOUBLE" + if self.cursor.istype(col, "BINARY"): + return "BINARY" + if self.cursor.istype(col, "NCHAR"): + return "NCHAR" + if self.cursor.istype(col, "TIMESTAMP"): + return "TIMESTAMP" + if self.cursor.istype(col, "JSON"): + return "JSON" + if self.cursor.istype(col, "TINYINT UNSIGNED"): + return "TINYINT UNSIGNED" + if self.cursor.istype(col, "SMALLINT UNSIGNED"): + return "SMALLINT UNSIGNED" + if self.cursor.istype(col, "INT UNSIGNED"): + return "INT UNSIGNED" + if self.cursor.istype(col, "BIGINT UNSIGNED"): + return "BIGINT UNSIGNED" + + def taosdStatus(self, state): + tdLog.sleep(5) + pstate = 0 + for i in range(30): + pstate = 0 + pl = psutil.pids() + for pid in pl: + try: + if psutil.Process(pid).name() == 'taosd': + print('have already started') + pstate = 1 + break + except psutil.NoSuchProcess: + pass + if pstate == state :break + if state or pstate: + tdLog.sleep(1) + continue + pstate = 0 + break + + args=(pstate,state) + if pstate == state: + tdLog.info("taosd state is %d == expect:%d" %args) + else: + tdLog.exit("taosd state is %d != expect:%d" %args) + pass + + def haveFile(self, dir, state): + if os.path.exists(dir) and os.path.isdir(dir): + if not os.listdir(dir): + if state : + tdLog.exit("dir: %s is empty, expect: not empty" %dir) + else: + tdLog.info("dir: %s is empty, expect: empty" %dir) + else: + if state : + tdLog.info("dir: %s is not empty, expect: not empty" %dir) + else: + tdLog.exit("dir: %s is not empty, expect: empty" %dir) + else: + tdLog.exit("dir: %s doesn't exist" %dir) + def createDir(self, dir): + if os.path.exists(dir): + shutil.rmtree(dir) + tdLog.info("dir: %s is removed" %dir) + os.makedirs( dir, 755 ) + tdLog.info("dir: %s is created" %dir) + pass + +tdSql = TDSql() diff --git a/tests/army/frame/sqlset.py b/tests/army/frame/sqlset.py new file mode 100644 index 0000000000..3c1b6cd7f7 --- /dev/null +++ b/tests/army/frame/sqlset.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from util.sql import tdSql + +class TDSetSql: + def init(self, conn, logSql): + + self.stbname = 'stb' + + def set_create_normaltable_sql(self, ntbname='ntb', + column_dict={'ts':'timestamp','col1':'tinyint','col2':'smallint','col3':'int','col4':'bigint','col5': 'unsigned int','col6': 'unsigned tinyint','col7': 'unsigned smallint', + 'col8': 'unsigned int','col9': 'unsigned bigint','col10': 'float','col11': 'double','col12': 'bool','col13': 'binary(20)','col14': 'nchar(20)'}): + column_sql = '' + for k, v in column_dict.items(): + column_sql += f"{k} {v}," + create_ntb_sql = f'create table {ntbname} ({column_sql[:-1]})' + return create_ntb_sql + + def set_create_stable_sql(self,stbname='stb', + column_dict={'ts':'timestamp','col1':'tinyint','col2':'smallint','col3':'int','col4':'bigint','col5': 'unsigned int','col6': 'unsigned tinyint','col7': 'unsigned smallint', + 'col8': 'unsigned int','col9': 'unsigned bigint','col10': 'float','col11': 'double','col12': 'bool','col13': 'binary(20)','col14': 'nchar(20)'}, + tag_dict={'ts_tag':'timestamp','t1':'tinyint','t2':'smallint','t3':'int','t4':'bigint','t5': 'unsigned int','t6': 'unsigned tinyint','t7': 'unsigned smallint', + 't8': 'unsigned int','t9': 'unsigned bigint','t10': 'float','t11': 'double','t12': 'bool','t13': 'binary(20)','t14': 'nchar(20)'}): + column_sql = '' + tag_sql = '' + for k,v in column_dict.items(): + column_sql += f"{k} {v}," + for k,v in tag_dict.items(): + tag_sql += f"{k} {v}," + create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})' + return create_stb_sql + + def set_insertsql(self,column_dict,tbname,binary_str=None,nchar_str=None): + sql = '' + for k, v in column_dict.items(): + if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \ + v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool': + sql += '%d,' + elif v.lower() == 'float' or v.lower() == 'double': + sql += '%f,' + elif 'binary' in v.lower(): + sql += f'"{binary_str}%d",' + elif 'nchar' in v.lower(): + sql += f'"{nchar_str}%d",' + return (f'insert into {tbname} values({sql[:-1]})') + + def insert_values(self,column_dict,i,insert_sql,insert_list,ts): + for k, v in column_dict.items(): + if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\ + 'binary' in v.lower() or 'nchar' in v.lower(): + insert_list.append(0 + i) + elif v.lower() == 'float' or v.lower() == 'double': + insert_list.append(0.1 + i) + elif v.lower() == 'bool': + insert_list.append(i % 2) + elif v.lower() == 'timestamp': + insert_list.append(ts + i) + tdSql.execute(insert_sql%(tuple(insert_list))) + \ No newline at end of file diff --git a/tests/army/frame/sub.py b/tests/army/frame/sub.py new file mode 100644 index 0000000000..664d830b86 --- /dev/null +++ b/tests/army/frame/sub.py @@ -0,0 +1,44 @@ +################################################################### + # Copyright (c) 2020 by TAOS Technologies, Inc. + # All rights reserved. + # + # This file is proprietary and confidential to TAOS Technologies. + # No part of this file may be reproduced, stored, transmitted, + # disclosed or used in any form or by any means other than as + # expressly provided by the written permission from Jianhui Tao + # +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +from util.log import * + +class TDSub: + def __init__(self): + self.consumedRows = 0 + self.consumedCols = 0 + + def init(self, sub): + self.sub = sub + + def close(self, keepProgress): + self.sub.close(keepProgress) + + def consume(self): + self.result = self.sub.consume() + self.result.fetch_all() + self.consumedRows = self.result.row_count + self.consumedCols = self.result.field_count + return self.consumedRows + + def checkRows(self, expectRows): + if self.consumedRows != expectRows: + tdLog.exit("consumed rows:%d != expect:%d" % (self.consumedRows, expectRows)) + tdLog.info("consumed rows:%d == expect:%d" % (self.consumedRows, expectRows)) + + +tdSub = TDSub() diff --git a/tests/army/frame/taosadapter.py b/tests/army/frame/taosadapter.py new file mode 100644 index 0000000000..1f1c38672e --- /dev/null +++ b/tests/army/frame/taosadapter.py @@ -0,0 +1,261 @@ +import requests +from fabric2 import Connection +from util.log import * +from util.common import * + + +class TAdapter: + def __init__(self): + self.running = 0 + self.deployed = 0 + self.remoteIP = "" + self.taosadapter_cfg_dict = { + "debug" : True, + "taosConfigDir" : "", + "port" : 6041, + "logLevel" : "error", + "cors" : { + "allowAllOrigins" : True, + }, + "pool" : { + "maxConnect" : 4000, + "maxIdle" : 4000, + "idleTimeout" : "1h" + }, + "ssl" : { + "enable" : False, + "certFile" : "", + "keyFile" : "", + }, + "log" : { + "path" : "", + "rotationCount" : 30, + "rotationTime" : "24h", + "rotationSize" : "1GB", + "enableRecordHttpSql" : True, + "sqlRotationCount" : 2, + "sqlRotationTime" : "24h", + "sqlRotationSize" : "1GB", + }, + "monitor" : { + "collectDuration" : "3s", + "incgroup" : False, + "pauseQueryMemoryThreshold" : 70, + "pauseAllMemoryThreshold" : 80, + "identity" : "", + "writeToTD" : True, + "user" : "root", + "password" : "taosdata", + "writeInterval" : "30s" + }, + "opentsdb" : { + "enable" : True + }, + "influxdb" : { + "enable" : True + }, + "statsd" : { + "enable" : True + }, + "collectd" : { + "enable" : True + }, + "opentsdb_telnet" : { + "enable" : True + }, + "node_exporter" : { + "enable" : True + }, + "prometheus" : { + "enable" : True + }, + } + # TODO: add taosadapter env: + # 1. init cfg.toml.dict :OK + # 2. dump dict to toml : OK + # 3. update cfg.toml.dict :OK + # 4. check adapter exists : OK + # 5. deploy adapter cfg : OK + # 6. adapter start : OK + # 7. adapter stop + + def init(self, path, remoteIP=""): + self.path = path + self.remoteIP = remoteIP + binPath = get_path() + "/../../../" + binPath = os.path.realpath(binPath) + + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + + if self.remoteIP: + try: + self.config = eval(remoteIP) + self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]}) + except Exception as e: + tdLog.notice(e) + + def update_cfg(self, update_dict :dict): + if not isinstance(update_dict, dict): + return + if "log" in update_dict and "path" in update_dict["log"]: + del update_dict["log"]["path"] + for key, value in update_dict.items(): + if key in ["cors", "pool", "ssl", "log", "monitor", "opentsdb", "influxdb", "statsd", "collectd", "opentsdb_telnet", "node_exporter", "prometheus"]: + if isinstance(value, dict): + for k, v in value.items(): + self.taosadapter_cfg_dict[key][k] = v + else: + self.taosadapter_cfg_dict[key] = value + + def check_adapter(self): + if get_path(tool="taosadapter"): + return False + else: + return True + + def remote_exec(self, updateCfgDict, execCmd): + remoteCfgDict = copy.deepcopy(updateCfgDict) + if "log" in remoteCfgDict and "path" in remoteCfgDict["log"]: + del remoteCfgDict["log"]["path"] + + remoteCfgDictStr = base64.b64encode(toml.dumps(remoteCfgDict).encode()).decode() + execCmdStr = base64.b64encode(execCmd.encode()).decode() + with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')): + self.remote_conn.run(f"python3 ./test.py -D {remoteCfgDictStr} -e {execCmdStr}" ) + + def cfg(self, option, value): + cmd = f"echo {option} = {value} >> {self.cfg_path}" + if os.system(cmd) != 0: + tdLog.exit(cmd) + + def deploy(self, *update_cfg_dict): + self.log_dir = os.path.join(self.path,"sim","dnode1","log") + self.cfg_dir = os.path.join(self.path,"sim","dnode1","cfg") + self.cfg_path = os.path.join(self.cfg_dir,"taosadapter.toml") + + cmd = f"touch {self.cfg_path}" + if os.system(cmd) != 0: + tdLog.exit(cmd) + + self.taosadapter_cfg_dict["log"]["path"] = self.log_dir + if bool(update_cfg_dict): + self.update_cfg(update_dict=update_cfg_dict) + + if (self.remoteIP == ""): + dict2toml(self.taosadapter_cfg_dict, self.cfg_path) + else: + self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.deploy(update_cfg_dict)") + + self.deployed = 1 + + tdLog.debug(f"taosadapter is deployed and configured by {self.cfg_path}") + + def start(self): + bin_path = get_path(tool="taosadapter") + + if (bin_path == ""): + tdLog.exit("taosadapter not found!") + else: + tdLog.info(f"taosadapter found: {bin_path}") + + if platform.system().lower() == 'windows': + cmd = f"mintty -h never {bin_path} -c {self.cfg_path}" + else: + cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null & " + + if self.remoteIP: + self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()") + self.running = 1 + else: + os.system(f"rm -rf {self.log_dir}{os.sep}taosadapter*") + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug(f"taosadapter is running with {cmd} " ) + + time.sleep(0.1) + + taosadapter_port = self.taosadapter_cfg_dict["port"] + for i in range(5): + ip = 'localhost' + if self.remoteIP != "": + ip = self.remoteIP + url = f'http://{ip}:{taosadapter_port}/-/ping' + try: + r = requests.get(url) + if r.status_code == 200: + tdLog.info(f"the taosadapter has been started, using port:{taosadapter_port}") + break + except Exception: + tdLog.info(f"the taosadapter do not started!!!") + time.sleep(1) + + def start_taosadapter(self): + """ + use this method, must deploy taosadapter + """ + bin_path = get_path(tool="taosadapter") + + if (bin_path == ""): + tdLog.exit("taosadapter not found!") + else: + tdLog.info(f"taosadapter found: {bin_path}") + + if self.deployed == 0: + tdLog.exit("taosadapter is not deployed") + + if platform.system().lower() == 'windows': + cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}" + else: + cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null & " + + if self.remoteIP: + self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()") + self.running = 1 + else: + if os.system(cmd) != 0: + tdLog.exit(cmd) + self.running = 1 + tdLog.debug(f"taosadapter is running with {cmd} " ) + + time.sleep(0.1) + + def stop(self, force_kill=False): + signal = "-9" if force_kill else "-15" + if self.remoteIP: + self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.running=1\ntAdapter.stop()") + tdLog.info("stop taosadapter") + return + toBeKilled = "taosadapter" + if platform.system().lower() == 'windows': + psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + killCmd = "kill %s %s > nul 2>&1" % (signal, processID) + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + self.running = 0 + tdLog.debug(f"taosadapter is stopped by kill {signal}") + + else: + if self.running != 0: + psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'" + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + while(processID): + killCmd = "kill %s %s > /dev/null 2>&1" % (signal, processID) + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip() + port = 6041 + fuserCmd = f"fuser -k -n tcp {port} > /dev/null" + os.system(fuserCmd) + self.running = 0 + tdLog.debug(f"taosadapter is stopped by kill {signal}") + + + +tAdapter = TAdapter() diff --git a/tests/army/frame/taosdemoCfg.py b/tests/army/frame/taosdemoCfg.py new file mode 100644 index 0000000000..f708d303de --- /dev/null +++ b/tests/army/frame/taosdemoCfg.py @@ -0,0 +1,465 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import inspect +import psutil +import shutil +import json +from util.log import * +from multiprocessing import cpu_count + + +# TODO: fully test the function. Handle exceptions. +# Handle json format not accepted by taosdemo + +### How to use TaosdemoCfg: +# Before you start: +# Make sure you understand how is taosdemo's JSON file structured. Because the python used does +# not support directory in directory for self objects, the config is being tear to different parts. +# Please make sure you understand which directory represent which part of which type of the file +# This module will reassemble the parts when creating the JSON file. +# +# Basic use example +# step 1:use self.append_sql_stb() to append the insert/query/subscribe directory into the module +# you can append many insert/query/subscribe directory, but pay attention about taosdemo's limit +# step 2:use alter function to alter the specific config +# step 3:use the generation function to generate the files +# +# step 1 and step 2 can be replaced with using import functions +class TDTaosdemoCfg: + def __init__(self): + self.insert_cfg = { + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": cpu_count(), + "create_table_thread_count": cpu_count(), + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 32766, + "max_sql_len": 32766, + "databases": None + } + + self.db = { + "name": 'db', + "drop": 'yes', + "replica": 1, + "duration": 10, + "cache": 16, + "blocks": 6, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + self.query_cfg = { + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "query_mode": "taosc", + "specified_table_query": None, + "super_table_query": None + } + + self.table_query = { + "query_interval": 1, + "concurrent": 3, + "sqls": None + } + + self.stable_query = { + "stblname": "stb", + "query_interval": 1, + "threads": 3, + "sqls": None + } + + self.sub_cfg = { + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": None, + "super_table_query": None + } + + self.table_sub = { + "concurrent": 1, + "mode": "sync", + "interval": 10000, + "restart": "yes", + "keepProgress": "yes", + "sqls": None + } + + self.stable_sub = { + "stblname": "stb", + "threads": 1, + "mode": "sync", + "interval": 10000, + "restart": "yes", + "keepProgress": "yes", + "sqls": None + } + + self.stbs = [] + self.stb_template = { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 10, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 32766, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count": 1}], + "tags": [{"type": "BIGINT", "count": 1}] + } + + self.tb_query_sql = [] + self.tb_query_sql_template = { + "sql": "select last_row(*) from stb_0 ", + "result": "temp/query_res0.txt" + } + + self.stb_query_sql = [] + self.stb_query_sql_template = { + "sql": "select last_row(ts) from xxxx", + "result": "temp/query_res2.txt" + } + + self.tb_sub_sql = [] + self.tb_sub_sql_template = { + "sql": "select * from stb_0 ;", + "result": "temp/subscribe_res0.txt" + } + + self.stb_sub_sql = [] + self.stb_sub_sql_template = { + "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", + "result": "temp/subscribe_res1.txt" + } + + # The following functions are import functions for different dicts and lists + # except import_sql, all other import functions will a dict and overwrite the origional dict + # dict_in: the dict used to overwrite the target + def import_insert_cfg(self, dict_in): + self.insert_cfg = dict_in + + def import_db(self, dict_in): + self.db = dict_in + + def import_stbs(self, dict_in): + self.stbs = dict_in + + def import_query_cfg(self, dict_in): + self.query_cfg = dict_in + + def import_table_query(self, dict_in): + self.table_query = dict_in + + def import_stable_query(self, dict_in): + self.stable_query = dict_in + + def import_sub_cfg(self, dict_in): + self.sub_cfg = dict_in + + def import_table_sub(self, dict_in): + self.table_sub = dict_in + + def import_stable_sub(self, dict_in): + self.stable_sub = dict_in + + def import_sql(self, Sql_in, mode): + """used for importing the sql later used + + Args: + Sql_in (dict): the imported sql dict + mode (str): the sql storing location within TDTaosdemoCfg + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + """ + if mode == 'query_table': + self.tb_query_sql = Sql_in + elif mode == 'query_stable': + self.stb_query_sql = Sql_in + elif mode == 'sub_table': + self.tb_sub_sql = Sql_in + elif mode == 'sub_stable': + self.stb_sub_sql = Sql_in + # import functions end + + # The following functions are alter functions for different dicts + # Args: + # key: the key that is going to be modified + # value: the value of the key that is going to be modified + # if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls" + # value will not be used + + def alter_insert_cfg(self, key, value): + + if key == 'databases': + self.insert_cfg[key] = [ + { + 'dbinfo': self.db, + 'super_tables': self.stbs + } + ] + else: + self.insert_cfg[key] = value + + def alter_db(self, key, value): + self.db[key] = value + + def alter_query_tb(self, key, value): + if key == "sqls": + self.table_query[key] = self.tb_query_sql + else: + self.table_query[key] = value + + def alter_query_stb(self, key, value): + if key == "sqls": + self.stable_query[key] = self.stb_query_sql + else: + self.stable_query[key] = value + + def alter_query_cfg(self, key, value): + if key == "specified_table_query": + self.query_cfg["specified_table_query"] = self.table_query + elif key == "super_table_query": + self.query_cfg["super_table_query"] = self.stable_query + else: + self.query_cfg[key] = value + + def alter_sub_cfg(self, key, value): + if key == "specified_table_query": + self.sub_cfg["specified_table_query"] = self.table_sub + elif key == "super_table_query": + self.sub_cfg["super_table_query"] = self.stable_sub + else: + self.sub_cfg[key] = value + + def alter_sub_stb(self, key, value): + if key == "sqls": + self.stable_sub[key] = self.stb_sub_sql + else: + self.stable_sub[key] = value + + def alter_sub_tb(self, key, value): + if key == "sqls": + self.table_sub[key] = self.tb_sub_sql + else: + self.table_sub[key] = value + # alter function ends + + # the following functions are for handling the sql lists + def append_sql_stb(self, target, value): + """for appending sql dict into specific sql list + + Args: + target (str): the target append list + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + value (dict): the sql dict going to be appended + """ + if target == 'insert_stbs': + self.stbs.append(value) + elif target == 'query_table': + self.tb_query_sql.append(value) + elif target == 'query_stable': + self.stb_query_sql.append(value) + elif target == 'sub_table': + self.tb_sub_sql.append(value) + elif target == 'sub_stable': + self.stb_sub_sql.append(value) + + def pop_sql_stb(self, target, index): + """for poping a sql dict from specific sql list + + Args: + target (str): the target append list + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + index (int): the sql dict that is going to be popped + """ + if target == 'insert_stbs': + self.stbs.pop(index) + elif target == 'query_table': + self.tb_query_sql.pop(index) + elif target == 'query_stable': + self.stb_query_sql.pop(index) + elif target == 'sub_table': + self.tb_sub_sql.pop(index) + elif target == 'sub_stable': + self.stb_sub_sql.pop(index) + # sql list modification function end + + # The following functions are get functions for different dicts + def get_db(self): + return self.db + + def get_stb(self): + return self.stbs + + def get_insert_cfg(self): + return self.insert_cfg + + def get_query_cfg(self): + return self.query_cfg + + def get_tb_query(self): + return self.table_query + + def get_stb_query(self): + return self.stable_query + + def get_sub_cfg(self): + return self.sub_cfg + + def get_tb_sub(self): + return self.table_sub + + def get_stb_sub(self): + return self.stable_sub + + def get_sql(self, target): + """general get function for all sql lists + + Args: + target (str): the sql list want to get + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + """ + if target == 'query_table': + return self.tb_query_sql + elif target == 'query_stable': + return self.stb_query_sql + elif target == 'sub_table': + return self.tb_sub_sql + elif target == 'sub_stable': + return self.stb_sub_sql + + def get_template(self, target): + """general get function for the default sql template + + Args: + target (str): the sql list want to get + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + """ + if target == 'insert_stbs': + return self.stb_template + elif target == 'query_table': + return self.tb_query_sql_template + elif target == 'query_stable': + return self.stb_query_sql_template + elif target == 'sub_table': + return self.tb_sub_sql_template + elif target == 'sub_stable': + return self.stb_sub_sql_template + else: + print(f'did not find {target}') + + # the folloing are the file generation functions + """defalut document: + generator functio for generating taosdemo json file + will assemble the dicts and dump the final json + + Args: + pathName (str): the directory wanting the json file to be + fileName (str): the name suffix of the json file + Returns: + str: [pathName]/[filetype]_[filName].json + """ + + def generate_insert_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/insert_{fileName}.json' + self.alter_insert_cfg('databases', None) + with open(cfgFileName, 'w') as file: + json.dump(self.insert_cfg, file) + return cfgFileName + + def generate_query_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/query_{fileName}.json' + self.alter_query_tb('sqls', None) + self.alter_query_stb('sqls', None) + self.alter_query_cfg('specified_table_query', None) + self.alter_query_cfg('super_table_query', None) + with open(cfgFileName, 'w') as file: + json.dump(self.query_cfg, file) + return cfgFileName + + def generate_subscribe_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/subscribe_{fileName}.json' + self.alter_sub_tb('sqls', None) + self.alter_sub_stb('sqls', None) + self.alter_sub_cfg('specified_table_query', None) + self.alter_sub_cfg('super_table_query', None) + with open(cfgFileName, 'w') as file: + json.dump(self.sub_cfg, file) + return cfgFileName + # file generation functions ends + + def drop_cfg_file(self, fileName): + os.remove(f'{fileName}') + + +taosdemoCfg = TDTaosdemoCfg() diff --git a/tests/army/frame/types.py b/tests/army/frame/types.py new file mode 100644 index 0000000000..218a477026 --- /dev/null +++ b/tests/army/frame/types.py @@ -0,0 +1,38 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from enum import Enum + +class TDSmlProtocolType(Enum): + ''' + Schemaless Protocol types + 0 - unknown + 1 - InfluxDB Line Protocol + 2 - OpenTSDB Telnet Protocl + 3 - OpenTSDB JSON Protocol + ''' + UNKNOWN = 0 + LINE = 1 + TELNET = 2 + JSON = 3 + +class TDSmlTimestampType(Enum): + NOT_CONFIGURED = 0 + HOUR = 1 + MINUTE = 2 + SECOND = 3 + MILLI_SECOND = 4 + MICRO_SECOND = 5 + NANO_SECOND = 6 + +