diff --git a/tests/pytest/functions/function_arithmetic.py b/tests/pytest/functions/function_arithmetic.py new file mode 100644 index 0000000000..a2249bab88 --- /dev/null +++ b/tests/pytest/functions/function_arithmetic.py @@ -0,0 +1,71 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create table test(ts timestamp, col1 int, col2 int) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute("create table test2 using test tags('shanghai')") + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d)" % (self.ts + i, i + 1, i + 1)) + tdSql.execute("insert into test2 values(%d, %d, %d)" % (self.ts + i, i + 1, i + 1)) + + # arithmetic verifacation + tdSql.query("select 0.1 + 0.1 from test") + tdSql.checkRows(self.rowNum * 2) + for i in range(self.rowNum * 2): + tdSql.checkData(0, 0, 0.20000000) + + tdSql.query("select 4 * avg(col1) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 22) + + tdSql.query("select 4 * sum(col1) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 440) + + tdSql.query("select 4 * avg(col1) * sum(col2) from test") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 2420) + + tdSql.query("select 4 * avg(col1) * sum(col2) from test group by loc") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 1210) + tdSql.checkData(1, 0, 1210) + + tdSql.error("select avg(col1 * 2)from test group by loc") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/restfulInsert.py b/tests/pytest/insert/restfulInsert.py index a6c9b074e1..9fa1f33a24 100644 --- a/tests/pytest/insert/restfulInsert.py +++ b/tests/pytest/insert/restfulInsert.py @@ -15,25 +15,28 @@ import requests import threading import random import time +import argparse class RestfulInsert: - def init(self): + def __init__(self, host, dbname, threads, tables, records, batchSize, tbNamePerfix, outOfOrder): self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='} - self.url = "http://127.0.0.1:6041/rest/sql" + self.url = "http://%s:6041/rest/sql" % host self.ts = 1500000000000 - self.numOfThreads = 20 - self.numOfTables = 10000 - self.recordsPerTable = 10000 - self.batchSize = 1000 - self.tableNamePerfix = 't' + self.dbname = dbname + self.numOfThreads = threads + self.numOfTables = tables + self.recordsPerTable = records + self.batchSize = batchSize + self.tableNamePerfix = tbNamePerfix + self.outOfOrder = outOfOrder def createTable(self, threadID): - tablesPerThread = int (self.numOfTables / self.numOfThreads) + tablesPerThread = int (self.numOfTables / self.numOfThreads) print("create table %d to %d" % (tablesPerThread * threadID, tablesPerThread * (threadID + 1) - 1)) for i in range(tablesPerThread): tableID = threadID * tablesPerThread name = 'beijing' if tableID % 2 == 0 else 'shanghai' - data = "create table test.%s%d using test.meters tags(%d, '%s')" % (self.tableNamePerfix, tableID + i, tableID + i, name) + data = "create table %s.%s%d using %s.meters tags(%d, '%s')" % (self.dbname, self.tableNamePerfix, tableID + i, self.dbname, tableID + i, name) requests.post(self.url, data, headers = self.header) def insertData(self, threadID): @@ -43,17 +46,42 @@ class RestfulInsert: tableID = i + threadID * tablesPerThread start = self.ts for j in range(int(self.recordsPerTable / self.batchSize)): - data = "insert into test.%s%d values" % (self.tableNamePerfix, tableID) + data = "insert into %s.%s%d values" % (self.dbname, self.tableNamePerfix, tableID) + values = [] for k in range(self.batchSize): - data += "(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100)) + data += "(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100)) + requests.post(self.url, data, headers = self.header) + + def insertUnlimitedData(self, threadID): + print("thread %d started" % threadID) + tablesPerThread = int (self.numOfTables / self.numOfThreads) + while True: + i = 0 + start = self.ts + + for i in range(tablesPerThread): + tableID = i + threadID * tablesPerThread + + data = "insert into %s.%s%d values" % (self.dbname, self.tableNamePerfix, tableID) + values = [] + for k in range(self.batchSize): + values.append("(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100))) + + if(self.outOfOrder == False): + for k in range(len(values)): + data += values[k] + else: + random.shuffle(values) + for k in range(len(values)): + data += values[k] requests.post(self.url, data, headers = self.header) def run(self): - data = "drop database if exists test" + data = "drop database if exists %s" % self.dbname requests.post(self.url, data, headers = self.header) - data = "create database test" + data = "create database %s" % self.dbname requests.post(self.url, data, headers = self.header) - data = "create table test.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))" + data = "create table %s.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))" % self.dbname requests.post(self.url, data, headers = self.header) threads = [] @@ -70,7 +98,10 @@ class RestfulInsert: threads = [] startTime = time.time() for i in range(self.numOfThreads): - thread = threading.Thread(target=self.insertData, args=(i,)) + if(self.recordsPerTable != -1): + thread = threading.Thread(target=self.insertData, args=(i,)) + else: + thread = threading.Thread(target=self.insertUnlimitedData, args=(i,)) thread.start() threads.append(thread) @@ -78,6 +109,62 @@ class RestfulInsert: threads[i].join() print("inserting %d records takes %d seconds" % (self.numOfTables * self.recordsPerTable, (time.time() - startTime))) -ri = RestfulInsert() -ri.init() +parser = argparse.ArgumentParser() +parser.add_argument( + '-H', + '--host-name', + action='store', + default='127.0.0.1', + type=str, + help='host name to be connected (default: 127.0.0.1)') +parser.add_argument( + '-d', + '--db-name', + action='store', + default='test', + type=str, + help='Database name to be created (default: test)') +parser.add_argument( + '-t', + '--number-of-threads', + action='store', + default=10, + type=int, + help='Number of threads to create tables and insert datas (default: 10)') +parser.add_argument( + '-T', + '--number-of-tables', + action='store', + default=1000, + type=int, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-r', + '--number-of-records', + action='store', + default=1000, + type=int, + help='Number of record to be created for each table (default: 1000, -1 for unlimited records)') +parser.add_argument( + '-s', + '--batch-size', + action='store', + default='1000', + type=int, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-p', + '--table-name-prefix', + action='store', + default='t', + type=str, + help='Number of tables to be created (default: 1000)') +parser.add_argument( + '-o', + '--out-of-order', + action='store_true', + help='The order of test data (default: False)') + +args = parser.parse_args() +ri = RestfulInsert(args.host_name, args.db_name, args.number_of_threads, args.number_of_tables, args.number_of_records, args.batch_size, args.table_name_prefix, args.out_of_order) ri.run() \ No newline at end of file