From c2fcab0a0d95f15bc736db0cf8bbb76658a44c80 Mon Sep 17 00:00:00 2001 From: tomchon Date: Mon, 7 Jun 2021 20:44:07 +0800 Subject: [PATCH 01/37] [TD-4312]: add testcase of nested query with interval function --- .../pytest/query/nestedQuery/insertData.json | 62 ++++++++++++++ .../query/nestedQuery/nestedQueryJson.py | 81 +++++++++++++++++++ .../pytest/query/nestedQuery/queryInterval.py | 76 +++++++++++++++++ 3 files changed, 219 insertions(+) create mode 100644 tests/pytest/query/nestedQuery/insertData.json create mode 100644 tests/pytest/query/nestedQuery/nestedQueryJson.py create mode 100644 tests/pytest/query/nestedQuery/queryInterval.py diff --git a/tests/pytest/query/nestedQuery/insertData.json b/tests/pytest/query/nestedQuery/insertData.json new file mode 100644 index 0000000000..d4ef8dbe97 --- /dev/null +++ b/tests/pytest/query/nestedQuery/insertData.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100000, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BOOL"}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/query/nestedQuery/nestedQueryJson.py b/tests/pytest/query/nestedQuery/nestedQueryJson.py new file mode 100644 index 0000000000..36a231a916 --- /dev/null +++ b/tests/pytest/query/nestedQuery/nestedQueryJson.py @@ -0,0 +1,81 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_1") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 200000) + + + + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf query/nestedQuery/%s.sql" % testcaseFilename ) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py new file mode 100644 index 0000000000..fc94cdf4eb --- /dev/null +++ b/tests/pytest/query/nestedQuery/queryInterval.py @@ -0,0 +1,76 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +import random + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts1 = 1593548685000 + self.ts2 = 1593548785000 + + + def run(self): + # tdSql.execute("drop database db ") + tdSql.prepare() + tdSql.execute("create table st (ts timestamp, num int, value int , t_instance int) tags (loc nchar(30))") + number = 20 + for n in range(number): + dt= n*300000 # collecting'frequency is 10s + args1=(self.ts1+dt,n,100+n,10+n) + args2=(self.ts2+dt,n,120+n,15+n) + tdSql.execute("insert into t0 using st tags('beijing') values(%d, %d, %d, %d)" % args1) + tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2) + + + tdSql.query("select avg(value) from st interval(10m)") + print(tdSql.queryResult) + tdSql.checkRows(11) + tdSql.checkData(0, 0, "2020-07-01 04:20:00") + tdSql.query("select avg_val from(select avg(value) as avg_val from st where loc='beijing' interval(10m));") + # tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing' interval(10m));") + print(tdSql.queryResult) + tdSql.checkData(0, 0, 109.5) + + + # tdSql.query("select avg(voltage) from st interval(1n, 15d)") + + # tdSql.query("select avg(voltage) from st interval(1n, 15d) group by loc") + + # tdDnodes.stop(1) + # tdDnodes.start(1) + # tdSql.query("select last(*) from t interval(1s)") + + + # tdSql.query("select first(ts),twa(c) from tb interval(14a)") + # tdSql.checkRows(6) + + # tdSql.query("select twa(c) from tb group by c") + # tdSql.checkRows(4) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 3f940428cb8e9f6dd7b9ca69536d7ed907964c04 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 9 Jun 2021 16:25:49 +0800 Subject: [PATCH 02/37] modify nestedquery testcase --- .../pytest/query/nestedQuery/queryInterval.py | 67 +++++++++++++++---- 1 file changed, 53 insertions(+), 14 deletions(-) diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py index fc94cdf4eb..a7c06a18f9 100644 --- a/tests/pytest/query/nestedQuery/queryInterval.py +++ b/tests/pytest/query/nestedQuery/queryInterval.py @@ -32,25 +32,64 @@ class TDTestCase: # tdSql.execute("drop database db ") tdSql.prepare() tdSql.execute("create table st (ts timestamp, num int, value int , t_instance int) tags (loc nchar(30))") - number = 20 - for n in range(number): - dt= n*300000 # collecting'frequency is 10s - args1=(self.ts1+dt,n,100+n,10+n) - args2=(self.ts2+dt,n,120+n,15+n) - tdSql.execute("insert into t0 using st tags('beijing') values(%d, %d, %d, %d)" % args1) - tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2) + node = 5 + number = 10 + for n in range(node): + for m in range(number): + dt= m*300000+n*60000 # collecting'frequency is 10s + args1=(n,n,self.ts1+dt,n,100+2*m+2*n,10+m+n) + # args2=(n,self.ts2+dt,n,120+n,15+n) + tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d, %d)" % args1) + # tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2) - + # interval function tdSql.query("select avg(value) from st interval(10m)") print(tdSql.queryResult) - tdSql.checkRows(11) + tdSql.checkRows(6) tdSql.checkData(0, 0, "2020-07-01 04:20:00") - tdSql.query("select avg_val from(select avg(value) as avg_val from st where loc='beijing' interval(10m));") - # tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing' interval(10m));") - print(tdSql.queryResult) - tdSql.checkData(0, 0, 109.5) - + tdSql.checkData(1, 1, 107.4) + + # subquery with interval + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));") + tdSql.checkData(0, 0, 109.0) + + # subquery with interval and select two Column in parent query + # tdSql.query("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));") + + # subquery with interval and sliding + tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(30s) limit 1;") + tdSql.checkData(0, 0, "2020-07-01 04:17:00") + tdSql.checkData(0, 1, 107.4) + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s));") + tdSql.checkData(0, 0, 111) + # subquery with interval and offset + tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m);") + tdSql.checkData(0, 0, "2020-07-01 04:21:00") + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m));") + tdSql.checkData(0, 0, 111) + + # subquery with interval,sliding and group by ; parent query with interval + tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(1m) group by loc limit 1 offset 52 ;") + tdSql.checkData(0, 0, "2020-07-01 05:09:00") + tdSql.checkData(0, 1, 118) + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc )interval(5m);") + tdSql.checkData(0, 0, 111) + + # # subquery and parent query with interval and sliding + # tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s)) interval(5m) sliding(1s);") + # tdSql.checkData(0, 0, 111) + + # subquery and parent query with top and bottom + tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") + tdSql.checkData(0, 0, 111) + tdSql.query("select bottom(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") + tdSql.checkData(0, 0, 111) + + + + + # tdSql.query("select avg(voltage) from st interval(1n, 15d)") # tdSql.query("select avg(voltage) from st interval(1n, 15d) group by loc") From 2c83206d657dd94ddd16b160575e0bd8018d3d87 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 9 Jun 2021 19:26:17 +0800 Subject: [PATCH 03/37] [TD-4314]: add testcase of nested query with top/bottom function --- .../pytest/query/nestedQuery/queryInterval.py | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py index a7c06a18f9..e346b26c60 100644 --- a/tests/pytest/query/nestedQuery/queryInterval.py +++ b/tests/pytest/query/nestedQuery/queryInterval.py @@ -44,7 +44,7 @@ class TDTestCase: # interval function tdSql.query("select avg(value) from st interval(10m)") - print(tdSql.queryResult) + # print(tdSql.queryResult) tdSql.checkRows(6) tdSql.checkData(0, 0, "2020-07-01 04:20:00") tdSql.checkData(1, 1, 107.4) @@ -54,37 +54,38 @@ class TDTestCase: tdSql.checkData(0, 0, 109.0) # subquery with interval and select two Column in parent query - # tdSql.query("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));") + tdSql.error("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));") # subquery with interval and sliding tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(30s) limit 1;") tdSql.checkData(0, 0, "2020-07-01 04:17:00") - tdSql.checkData(0, 1, 107.4) + tdSql.checkData(0, 1, 100) tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s));") tdSql.checkData(0, 0, 111) # subquery with interval and offset tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m);") tdSql.checkData(0, 0, "2020-07-01 04:21:00") - tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m));") - tdSql.checkData(0, 0, 111) + tdSql.checkData(0, 1, 100) + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m) group by loc);") + tdSql.checkData(0, 0, 109) # subquery with interval,sliding and group by ; parent query with interval tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(1m) group by loc limit 1 offset 52 ;") tdSql.checkData(0, 0, "2020-07-01 05:09:00") tdSql.checkData(0, 1, 118) - tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc )interval(5m);") - tdSql.checkData(0, 0, 111) + tdSql.query("select avg(avg_val) as ncst from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc ) interval(5m);") + tdSql.checkData(1, 1, 105) # # subquery and parent query with interval and sliding - # tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s)) interval(5m) sliding(1s);") - # tdSql.checkData(0, 0, 111) + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(5m)) interval(10m) sliding(2m);") + tdSql.checkData(29, 0, "2020-07-01 05:10:00.000") # subquery and parent query with top and bottom tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") - tdSql.checkData(0, 0, 111) + tdSql.checkData(0, 1, 115) tdSql.query("select bottom(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") - tdSql.checkData(0, 0, 111) + tdSql.checkData(1, 1, 111) From 176f1e018d406a720a3b05ad1ca6c3ce61ccc3fe Mon Sep 17 00:00:00 2001 From: tomchon Date: Thu, 10 Jun 2021 15:51:55 +0800 Subject: [PATCH 04/37] [TD-4314]: add testcase of nested query with top/bottom function --- .../pytest/query/nestedQuery/queryInterval.py | 28 ++++++------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py index e346b26c60..3ddf751b7f 100644 --- a/tests/pytest/query/nestedQuery/queryInterval.py +++ b/tests/pytest/query/nestedQuery/queryInterval.py @@ -12,6 +12,7 @@ # -*- coding: utf-8 -*- import sys +import os import taos from util.log import tdLog from util.cases import tdCases @@ -84,28 +85,15 @@ class TDTestCase: # subquery and parent query with top and bottom tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") tdSql.checkData(0, 1, 115) - tdSql.query("select bottom(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") - tdSql.checkData(1, 1, 111) + tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") + tdSql.checkData(0, 1, 125) + # + tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));") - - - - # tdSql.query("select avg(voltage) from st interval(1n, 15d)") - - # tdSql.query("select avg(voltage) from st interval(1n, 15d) group by loc") - - # tdDnodes.stop(1) - # tdDnodes.start(1) - # tdSql.query("select last(*) from t interval(1s)") - - - # tdSql.query("select first(ts),twa(c) from tb interval(14a)") - # tdSql.checkRows(6) - - # tdSql.query("select twa(c) from tb group by c") - # tdSql.checkRows(4) - + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf wal/%s.sql" % testcaseFilename ) def stop(self): tdSql.close() From c6d6ba3fd1da2e0d92e66417d023ae0dfd60a427 Mon Sep 17 00:00:00 2001 From: tomchon Date: Wed, 16 Jun 2021 20:54:08 +0800 Subject: [PATCH 05/37] [TD-45145]: add testcase of stat_window function --- tests/pytest/query/queryStateWindow.py | 92 ++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 tests/pytest/query/queryStateWindow.py diff --git a/tests/pytest/query/queryStateWindow.py b/tests/pytest/query/queryStateWindow.py new file mode 100644 index 0000000000..027ece89af --- /dev/null +++ b/tests/pytest/query/queryStateWindow.py @@ -0,0 +1,92 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.rowNum = 100000 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ) tags(dev nchar(50), tag2 binary(16))") + tdSql.execute( + 'CREATE TABLE if not exists dev_001 using st tags("dev_01", "tag_01")') + tdSql.execute( + 'CREATE TABLE if not exists dev_002 using st tags("dev_02", "tag_02")') + + print("==============step2") + + tdSql.execute( + "INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, true, '测试', 15, 10, 65534, 253)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254)") + + for i in range(self.rowNum): + tdSql.execute("insert into dev_002 (ts,t1) values(%d, %d,)" % (self.ts + i, i + 1)) + + tdSql.query("select count(ts) from dev_001 state_window(t1)") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 2) + tdSql.query("select count(ts) from dev_001 state_window(t3)") + tdSql.checkRows(2) + tdSql.checkData(1, 0, 2) + tdSql.query("select count(ts) from dev_001 state_window(t7)") + tdSql.checkRows(3) + tdSql.checkData(1, 0, 1) + tdSql.query("select count(ts) from dev_001 state_window(t8)") + tdSql.checkRows(3) + tdSql.checkData(2, 0, 1) + tdSql.query("select count(ts) from dev_001 state_window(t11)") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 3) + tdSql.query("select count(ts) from dev_001 state_window(t12)") + tdSql.checkRows(2) + tdSql.checkData(1, 0, 1) + tdSql.query("select count(ts) from dev_001 state_window(t13)") + tdSql.checkRows(2) + tdSql.checkData(1, 0, 1) + tdSql.query("select count(ts) from dev_001 state_window(t14)") + tdSql.checkRows(3) + tdSql.checkData(1, 0, 2) + tdSql.query("select count(ts) from dev_002 state_window(t1)") + tdSql.checkRows(100000) + + # error + tdSql.query("select count(ts) from dev_001 state_window(t7)") + tdSql.error("select count(*) from dev_001 state_window(t2)") + tdSql.error("select count(*) from st state_window(t3)") + tdSql.error("select count(*) from dev_001 state_window(t4)") + tdSql.error("select count(*) from dev_001 state_window(t5)") + tdSql.error("select count(*) from dev_001 state_window(t6)") + tdSql.error("select count(*) from dev_001 state_window(t9)") + tdSql.error("select count(*) from dev_001 state_window(t10)") + tdSql.error("select count(*) from dev_001 state_window(tag2)") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From e5f2bab3a6b9ee035967be1b5dc6c11b9fd7b109 Mon Sep 17 00:00:00 2001 From: tomchon Date: Sat, 19 Jun 2021 10:41:42 +0800 Subject: [PATCH 06/37] [TD-4509]: add testcase of irate function --- tests/pytest/functions/function_irate.py | 224 +++++++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 tests/pytest/functions/function_irate.py diff --git a/tests/pytest/functions/function_irate.py b/tests/pytest/functions/function_irate.py new file mode 100644 index 0000000000..15adab7651 --- /dev/null +++ b/tests/pytest/functions/function_irate.py @@ -0,0 +1,224 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 100 + self.ts = 1537146000000 + self.ts1 = 1537146000000000 + + + def run(self): + # db precison ms + tdSql.prepare() + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute("create table gtest1 (ts timestamp, col1 float)") + tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)") + + + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.execute("insert into gtest1 values(1537146000000,0);") + tdSql.execute("insert into gtest1 values(1537146001100,1.2);") + tdSql.execute("insert into gtest2 values(1537146001001,1);") + tdSql.execute("insert into gtest2 values(1537146001101,2);") + tdSql.execute("insert into gtest3 values(1537146001101,2);") + tdSql.execute("insert into gtest4(ts) values(1537146001101);") + tdSql.execute("insert into gtest5 values(1537146001002,4);") + tdSql.execute("insert into gtest5 values(1537146002202,4);") + tdSql.execute("insert into gtest6 values(1537146000000,5);") + tdSql.execute("insert into gtest6 values(1537146001000,2);") + tdSql.execute("insert into gtest7 values(1537146001000,1);") + tdSql.execute("insert into gtest7 values(1537146008000,2);") + tdSql.execute("insert into gtest7 values(1537146009000,6);") + tdSql.execute("insert into gtest7 values(1537146012000,3);") + tdSql.execute("insert into gtest7 values(1537146015000,3);") + tdSql.execute("insert into gtest7 values(1537146017000,1);") + tdSql.execute("insert into gtest7 values(1537146019000,3);") + tdSql.execute("insert into gtest8 values(1537146000002,4);") + tdSql.execute("insert into gtest8 values(1537146002202,4);") + + # irate verifacation + tdSql.query("select irate(col1) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from test1 interval(10s);") + tdSql.checkData(0, 1, 1) + tdSql.query("select irate(col1) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col2) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col3) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col4) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col5) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col6) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col11) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col12) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col13) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col14) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col2) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col2) from test1;") + tdSql.checkData(0, 0, 1) + + tdSql.query("select irate(col1) from gtest1;") + tdSql.checkData(0, 0, 1.2/1.1) + tdSql.query("select irate(col1) from gtest2;") + tdSql.checkData(0, 0, 10) + tdSql.query("select irate(col1) from gtest3;") + tdSql.checkData(0, 0, 0) + tdSql.query("select irate(col1) from gtest4;") + tdSql.checkRows(0) + tdSql.query("select irate(col1) from gtest5;") + tdSql.checkData(0, 0, 0) + tdSql.query("select irate(col1) from gtest6;") + tdSql.checkData(0, 0, 2) + tdSql.query("select irate(col1) from gtest7;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;") + tdSql.checkData(1, 1, 4) + tdSql.checkData(2, 1, 0) + tdSql.checkData(3, 1, 1) + tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;") + tdSql.checkData(1, 1, 0) + tdSql.checkData(2, 1, 4) + tdSql.checkData(3, 1, 0) + + #error + tdSql.error("select irate(col1) from test") + tdSql.error("select irate(ts) from test1") + tdSql.error("select irate(col7) from test1") + tdSql.error("select irate(col8) from test1") + tdSql.error("select irate(col9) from test1") + tdSql.error("select irate(loc) from test1") + + # use db1 precision us + tdSql.execute("create database db1 precision 'us' keep 3650 UPDATE 1") + tdSql.execute("use db1 ") + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute("create table gtest1 (ts timestamp, col1 float)") + tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest9 (ts timestamp, col1 tinyint)") + + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts1 + i*1000000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.execute("insert into gtest1 values(1537146000000000,0);") + tdSql.execute("insert into gtest1 values(1537146001100000,1.2);") + tdSql.execute("insert into gtest2 values(1537146001001000,1);") + tdSql.execute("insert into gtest2 values(1537146001101000,2);") + tdSql.execute("insert into gtest3 values(1537146001101000,2);") + tdSql.execute("insert into gtest4(ts) values(1537146001101000);") + tdSql.execute("insert into gtest5 values(1537146001002000,4);") + tdSql.execute("insert into gtest5 values(1537146002202000,4);") + tdSql.execute("insert into gtest6 values(1537146000000000,5);") + tdSql.execute("insert into gtest6 values(1537146001000000,2);") + tdSql.execute("insert into gtest7 values(1537146001000000,1);") + tdSql.execute("insert into gtest7 values(1537146008000000,2);") + tdSql.execute("insert into gtest7 values(1537146009000000,6);") + tdSql.execute("insert into gtest7 values(1537146012000000,3);") + tdSql.execute("insert into gtest7 values(1537146015000000,3);") + tdSql.execute("insert into gtest7 values(1537146017000000,1);") + tdSql.execute("insert into gtest7 values(1537146019000000,3);") + tdSql.execute("insert into gtest8 values(1537146000002000,3);") + tdSql.execute("insert into gtest8 values(1537146001003000,4);") + tdSql.execute("insert into gtest9 values(1537146000000000,4);") + tdSql.execute("insert into gtest9 values(1537146000000001,5);") + + + # irate verifacation + tdSql.query("select irate(col1) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from test1 interval(10s);") + tdSql.checkData(0, 1, 1) + tdSql.query("select irate(col1) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from gtest1;") + tdSql.checkData(0, 0, 1.2/1.1) + tdSql.query("select irate(col1) from gtest2;") + tdSql.checkData(0, 0, 10) + tdSql.query("select irate(col1) from gtest3;") + tdSql.checkData(0, 0, 0) + tdSql.query("select irate(col1) from gtest4;") + tdSql.checkRows(0) + tdSql.query("select irate(col1) from gtest5;") + tdSql.checkData(0, 0, 0) + tdSql.query("select irate(col1) from gtest6;") + tdSql.checkData(0, 0, 2) + tdSql.query("select irate(col1) from gtest7;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;") + tdSql.checkData(1, 1, 4) + tdSql.checkData(2, 1, 0) + tdSql.checkData(3, 1, 1) + tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;") + tdSql.checkData(1, 1, 0) + tdSql.checkData(2, 1, 4) + tdSql.checkData(3, 1, 0) + tdSql.query("select irate(col1) from gtest8;") + tdSql.checkData(0, 0, 1/1.001) + tdSql.query("select irate(col1) from gtest9;") + tdSql.checkData(0, 0, 1000000) + + #error + # tdSql.error("select irate(col1) from test") + tdSql.error("select irate(ts) from test1") + tdSql.error("select irate(col7) from test1") + tdSql.error("select irate(col8) from test1") + tdSql.error("select irate(col9) from test1") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 91c557d99de11efc5793d9ed6cfa2317524a1457 Mon Sep 17 00:00:00 2001 From: tomchon Date: Sat, 19 Jun 2021 10:58:26 +0800 Subject: [PATCH 07/37] [TD-4509]: add testcase of irate function --- tests/pytest/functions/function_irate.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/pytest/functions/function_irate.py b/tests/pytest/functions/function_irate.py index 15adab7651..2c85e1bbdd 100644 --- a/tests/pytest/functions/function_irate.py +++ b/tests/pytest/functions/function_irate.py @@ -33,8 +33,8 @@ class TDTestCase: # db precison ms tdSql.prepare() tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table test1 using test tags('beijing')") + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''') + tdSql.execute("create table test1 using test tags('beijing', 10)") tdSql.execute("create table gtest1 (ts timestamp, col1 float)") tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)") tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)") @@ -129,6 +129,7 @@ class TDTestCase: tdSql.error("select irate(col8) from test1") tdSql.error("select irate(col9) from test1") tdSql.error("select irate(loc) from test1") + tdSql.error("select irate(tag1) from test1") # use db1 precision us tdSql.execute("create database db1 precision 'us' keep 3650 UPDATE 1") @@ -208,11 +209,14 @@ class TDTestCase: tdSql.checkData(0, 0, 1000000) #error - # tdSql.error("select irate(col1) from test") + tdSql.error("select irate(col1) from test") tdSql.error("select irate(ts) from test1") tdSql.error("select irate(col7) from test1") tdSql.error("select irate(col8) from test1") tdSql.error("select irate(col9) from test1") + tdSql.error("select irate(loc) from test1") + tdSql.error("select irate(tag1) from test1") + From ca30708ce1e920dfc505ccef5db4dab4becb5d56 Mon Sep 17 00:00:00 2001 From: tomchon Date: Sat, 19 Jun 2021 11:17:31 +0800 Subject: [PATCH 08/37] [TD-4509]: add testcase of irate function --- tests/pytest/fulltest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index b9b7bbcaf6..d2d850a850 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -322,6 +322,7 @@ python3 ./test.py -f alter/alter_table.py python3 ./test.py -f query/queryGroupbySort.py python3 ./test.py -f functions/function_stateWindow.py python3 ./test.py -f functions/function_derivative.py +python3 ./test.py -f functions/function_irate.py python3 ./test.py -f insert/unsignedInt.py python3 ./test.py -f insert/unsignedBigint.py From 643807e6630242388f38d749e4630416322347d6 Mon Sep 17 00:00:00 2001 From: tomchon Date: Sat, 19 Jun 2021 16:25:11 +0800 Subject: [PATCH 09/37] [TD-45145]: add testcase of stat_window function --- tests/pytest/query/queryStateWindow.py | 28 ++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/tests/pytest/query/queryStateWindow.py b/tests/pytest/query/queryStateWindow.py index 027ece89af..db9eaf8672 100644 --- a/tests/pytest/query/queryStateWindow.py +++ b/tests/pytest/query/queryStateWindow.py @@ -16,6 +16,7 @@ import taos from util.log import * from util.cases import * from util.sql import * +import numpy as np class TDTestCase: @@ -30,7 +31,7 @@ class TDTestCase: print("==============step1") tdSql.execute( - "create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ) tags(dev nchar(50), tag2 binary(16))") + "create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ,t15 int) tags(dev nchar(50), tag2 binary(16))") tdSql.execute( 'CREATE TABLE if not exists dev_001 using st tags("dev_01", "tag_01")') tdSql.execute( @@ -39,7 +40,7 @@ class TDTestCase: print("==============step2") tdSql.execute( - "INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, true, '测试', 15, 10, 65534, 253)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254)") + "INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)") for i in range(self.rowNum): tdSql.execute("insert into dev_002 (ts,t1) values(%d, %d,)" % (self.ts + i, i + 1)) @@ -55,7 +56,7 @@ class TDTestCase: tdSql.checkData(1, 0, 1) tdSql.query("select count(ts) from dev_001 state_window(t8)") tdSql.checkRows(3) - tdSql.checkData(2, 0, 1) + tdSql.checkData(2, 0, 1) tdSql.query("select count(ts) from dev_001 state_window(t11)") tdSql.checkRows(2) tdSql.checkData(0, 0, 3) @@ -69,16 +70,31 @@ class TDTestCase: tdSql.checkRows(3) tdSql.checkData(1, 0, 2) tdSql.query("select count(ts) from dev_002 state_window(t1)") - tdSql.checkRows(100000) + tdSql.checkRows(100000) + + # with all aggregate function + tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t15),last(t15),spread(t15),percentile(t15,90),t9 from dev_001 state_window(t9);") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 2) + tdSql.checkData(1, 1, 10) + tdSql.checkData(0, 2, 1) + # tdSql.checkData(0, 3, 1) + tdSql.checkData(0, 4, np.std([1,5])) + # tdSql.checkData(0, 5, 1) + tdSql.checkData(0, 6, 1) + tdSql.checkData(0, 7, 5) + tdSql.checkData(0, 8, 4) + tdSql.checkData(0, 9, 4.6) + tdSql.checkData(0, 10, 'True') + + tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t10),spread(t15),t9 from dev_001 state_window(t9);") # error - tdSql.query("select count(ts) from dev_001 state_window(t7)") tdSql.error("select count(*) from dev_001 state_window(t2)") tdSql.error("select count(*) from st state_window(t3)") tdSql.error("select count(*) from dev_001 state_window(t4)") tdSql.error("select count(*) from dev_001 state_window(t5)") tdSql.error("select count(*) from dev_001 state_window(t6)") - tdSql.error("select count(*) from dev_001 state_window(t9)") tdSql.error("select count(*) from dev_001 state_window(t10)") tdSql.error("select count(*) from dev_001 state_window(tag2)") From a0be6d9288cbfe7c4c194938b5c0202f8b19866c Mon Sep 17 00:00:00 2001 From: tomchon Date: Sat, 19 Jun 2021 17:02:09 +0800 Subject: [PATCH 10/37] [TD-45145]: add testcase of stat_window function --- tests/pytest/query/queryStateWindow.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/pytest/query/queryStateWindow.py b/tests/pytest/query/queryStateWindow.py index db9eaf8672..251dbef658 100644 --- a/tests/pytest/query/queryStateWindow.py +++ b/tests/pytest/query/queryStateWindow.py @@ -87,7 +87,10 @@ class TDTestCase: tdSql.checkData(0, 9, 4.6) tdSql.checkData(0, 10, 'True') - tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t10),spread(t15),t9 from dev_001 state_window(t9);") + # with where + tdSql.query("select avg(t15),t9 from dev_001 where t9='true' state_window(t9);") + tdSql.checkData(0, 0, 7) + tdSql.checkData(0, 1, 'True') # error tdSql.error("select count(*) from dev_001 state_window(t2)") From e73ddd1e6f9db6e7e36c617f39ce7fbdf170afd1 Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 21 Jun 2021 09:23:13 +0800 Subject: [PATCH 11/37] fix bug for disk full --- src/query/src/qExecutor.c | 6 +++++- src/query/src/qTsbuf.c | 30 +++++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index f97a0c4a74..69242adc2a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6720,7 +6720,11 @@ int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *p if (pQueryMsg->tsLen > 0) { // open new file to save the result char *tsBlock = (char *) pQueryMsg + pQueryMsg->tsOffset; pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pQueryMsg->tsNumOfBlocks, pQueryMsg->tsLen, pQueryMsg->tsOrder, vgId); - + if (pTsBuf == NULL) { + code = TSDB_CODE_QRY_NO_DISKSPACE; + goto _error; + } + tsBufResetPos(pTsBuf); bool ret = tsBufNextPos(pTsBuf); diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index 1f43c5b33c..f4c6e24290 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -2,6 +2,7 @@ #include "taoserror.h" #include "tscompression.h" #include "tutil.h" +#include "queryLog.h" static int32_t getDataStartOffset(); static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo); @@ -633,10 +634,14 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) { int32_t r = fseek(pTSBuf->f, 0, SEEK_SET); if (r != 0) { + qError("fseek failed, errno:%d", errno); return -1; } - fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f); + if (fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f) != sizeof(STSBufFileHeader)) { + qError("fwrite failed, errno:%d", errno); + return -1; + } return 0; } @@ -856,9 +861,17 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, pBlockInfo); int32_t ret = fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET); - UNUSED(ret); + if (ret == -1) { + qError("fseek failed, errno:%d", errno); + tsBufDestroy(pTSBuf); + return NULL; + } size_t sz = fwrite((void*)pData, 1, len, pTSBuf->f); - UNUSED(sz); + if (sz != len) { + qError("fwrite failed, errno:%d", errno); + tsBufDestroy(pTSBuf); + return NULL; + } pTSBuf->fileSize += len; pTSBuf->tsOrder = order; @@ -866,9 +879,16 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ STSBufFileHeader header = { .magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder}; - STSBufUpdateHeader(pTSBuf, &header); + if (STSBufUpdateHeader(pTSBuf, &header) < 0) { + tsBufDestroy(pTSBuf); + return NULL; + } - fsync(fileno(pTSBuf->f)); + if (fsync(fileno(pTSBuf->f)) == -1) { + qError("fsync failed, errno:%d", errno); + tsBufDestroy(pTSBuf); + return NULL; + } return pTSBuf; } From 7b2a1265309f677ccc76052b67a99e1435abf4f4 Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 21 Jun 2021 09:41:22 +0800 Subject: [PATCH 12/37] fix bug --- src/query/src/qTsbuf.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index f4c6e24290..81ad0af736 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -637,9 +637,10 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) { qError("fseek failed, errno:%d", errno); return -1; } - - if (fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f) != sizeof(STSBufFileHeader)) { - qError("fwrite failed, errno:%d", errno); + + size_t ws = fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f); + if (ws != sizeof(STSBufFileHeader)) { + qError("ts update header fwrite failed, size:%d, expected size:%d", (int32_t)ws, (int32_t)sizeof(STSBufFileHeader)); return -1; } return 0; @@ -868,7 +869,7 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ } size_t sz = fwrite((void*)pData, 1, len, pTSBuf->f); if (sz != len) { - qError("fwrite failed, errno:%d", errno); + qError("ts data fwrite failed, write size:%d, expected size:%d", (int32_t)sz, len); tsBufDestroy(pTSBuf); return NULL; } From 1a458d66db33dd3cdaee074fa18bda07d1a9a19d Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 21 Jun 2021 09:45:26 +0800 Subject: [PATCH 13/37] fix bug --- src/query/src/qTsbuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index 81ad0af736..9c04c7c929 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -639,7 +639,7 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) { } size_t ws = fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f); - if (ws != sizeof(STSBufFileHeader)) { + if (ws != 1) { qError("ts update header fwrite failed, size:%d, expected size:%d", (int32_t)ws, (int32_t)sizeof(STSBufFileHeader)); return -1; } From 8ae2c04c36739473dbd71a5b41f7ce68ddb35bb3 Mon Sep 17 00:00:00 2001 From: wpan Date: Tue, 22 Jun 2021 14:57:58 +0800 Subject: [PATCH 14/37] fix bug --- src/client/src/tscSub.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index e4f67ba0de..421a6ce93a 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -509,6 +509,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { } pSub->pSql = pSql; pSql->pSubscription = pSub; + pSub->lastSyncTime = 0; // no table list now, force to update it tscDebug("begin table synchronization"); From 612b6c84521e3fc0c762d7446657e1e3cfed7496 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 22 Jun 2021 16:33:00 +0800 Subject: [PATCH 15/37] [TD-4805]: fix startup os config display --- src/dnode/src/dnodeMain.c | 2 ++ src/os/inc/osSysinfo.h | 1 + src/os/src/darwin/darwinSysInfo.c | 11 ++++++++--- src/os/src/detail/osSysinfo.c | 11 ++++++++--- src/os/src/windows/wSysinfo.c | 7 ++++++- 5 files changed, 25 insertions(+), 7 deletions(-) diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 410e6bb188..22ce6c995a 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -253,6 +253,8 @@ static int32_t dnodeInitStorage() { dnodeCheckDataDirOpenned(tsDnodeDir); + taosGetDisk(); + taosPrintDiskInfo(); dInfo("dnode storage is initialized at %s", tsDnodeDir); return 0; } diff --git a/src/os/inc/osSysinfo.h b/src/os/inc/osSysinfo.h index 895b5dd499..1be2e94dc5 100644 --- a/src/os/inc/osSysinfo.h +++ b/src/os/inc/osSysinfo.h @@ -36,6 +36,7 @@ bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ; bool taosGetProcMemory(float *memoryUsedMB) ; bool taosGetSysMemory(float *memoryUsedMB); void taosPrintOsInfo(); +void taosPrintDiskInfo(); int taosSystem(const char * cmd) ; void taosKillSystem(); bool taosGetSystemUid(char *uid); diff --git a/src/os/src/darwin/darwinSysInfo.c b/src/os/src/darwin/darwinSysInfo.c index 6e70043779..3c80ee4260 100644 --- a/src/os/src/darwin/darwinSysInfo.c +++ b/src/os/src/darwin/darwinSysInfo.c @@ -137,9 +137,6 @@ void taosPrintOsInfo() { // uInfo(" os openMax: %" PRId64, tsOpenMax); // uInfo(" os streamMax: %" PRId64, tsStreamMax); uInfo(" os numOfCores: %d", tsNumOfCores); - uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); - uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); - uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); struct utsname buf; @@ -155,6 +152,14 @@ void taosPrintOsInfo() { uInfo("=================================="); } +void taosPrintDiskInfo() { + uInfo("=================================="); + uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); + uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); + uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); + uInfo("=================================="); +} + void taosKillSystem() { uError("function taosKillSystem, exit!"); exit(0); diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index bbda08aa25..a2a560b296 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -506,9 +506,6 @@ void taosPrintOsInfo() { uInfo(" os openMax: %" PRId64, tsOpenMax); uInfo(" os streamMax: %" PRId64, tsStreamMax); uInfo(" os numOfCores: %d", tsNumOfCores); - uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); - uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); - uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); struct utsname buf; @@ -523,6 +520,14 @@ void taosPrintOsInfo() { uInfo(" os machine: %s", buf.machine); } +void taosPrintDiskInfo() { + uInfo("=================================="); + uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); + uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); + uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); + uInfo("=================================="); +} + void taosKillSystem() { // SIGINT uInfo("taosd will shut down soon"); diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c index 8a81e3079a..72793a1049 100644 --- a/src/os/src/windows/wSysinfo.c +++ b/src/os/src/windows/wSysinfo.c @@ -205,10 +205,15 @@ void taosGetSystemInfo() { void taosPrintOsInfo() { uInfo(" os numOfCores: %d", tsNumOfCores); + uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); + uInfo("=================================="); +} + +void taosPrintDiskInfo() { + uInfo("=================================="); uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); - uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); uInfo("=================================="); } From 106bbc7e63cc047ca81e0699c7bb482312f45615 Mon Sep 17 00:00:00 2001 From: tomchon Date: Tue, 22 Jun 2021 18:42:31 +0800 Subject: [PATCH 16/37] [TD-4314]: add testcase of nested query with top/bottom function --- tests/pytest/fulltest.sh | 2 ++ tests/pytest/query/nestedQuery/queryInterval.py | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c9f68972f9..9373553cba 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -235,6 +235,8 @@ python3 ./test.py -f query/queryTscomputWithNow.py python3 ./test.py -f query/computeErrorinWhere.py python3 ./test.py -f query/queryTsisNull.py python3 ./test.py -f query/subqueryFilter.py +python3 ./test.py -f query/nestedQuery/queryInterval.py +python3 ./test.py -f query/queryStateWindow.py #stream diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py index 3ddf751b7f..11c42c463e 100644 --- a/tests/pytest/query/nestedQuery/queryInterval.py +++ b/tests/pytest/query/nestedQuery/queryInterval.py @@ -83,14 +83,16 @@ class TDTestCase: tdSql.checkData(29, 0, "2020-07-01 05:10:00.000") # subquery and parent query with top and bottom - tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") - tdSql.checkData(0, 1, 115) - tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num);") - tdSql.checkData(0, 1, 125) + tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val desc;") + tdSql.checkData(0, 1, 117) + tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val asc;") + tdSql.checkData(0, 1, 111) # tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));") + tdSql.checkData(0, 1, 120) + # clear env testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") os.system("rm -rf wal/%s.sql" % testcaseFilename ) From f1de963dc54cf9f961c57fb20e49914e325f497b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 22 Jun 2021 18:49:20 +0800 Subject: [PATCH 17/37] [TD-4823]: taosdemo getTableName return empty. (#6570) * [TD-4823]: taosdemo getTableName return empty. * fix typo. --- src/kit/taosdemo/taosdemo.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 4202a833ea..6b12e66cb9 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -6000,6 +6000,12 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); + if (0 == strlen(tableName)) { + errorPrint("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + free(pThreadInfo->buffer); + return NULL; + } int64_t remainderBufLen = maxSqlLen; char *pstr = pThreadInfo->buffer; From 20abb07e3f3da32fe9ad6954f4ff50c68e8d694b Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 22 Jun 2021 20:40:09 +0800 Subject: [PATCH 18/37] fix compile errors --- src/client/tests/timeParseTest.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client/tests/timeParseTest.cpp b/src/client/tests/timeParseTest.cpp index ba06a6b9aa..3cc6d541e0 100644 --- a/src/client/tests/timeParseTest.cpp +++ b/src/client/tests/timeParseTest.cpp @@ -98,7 +98,7 @@ TEST(testCase, parse_time) { taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, 852048000999); - int64_t k = timezone; + // int64_t k = timezone; char t42[] = "1997-1-1T0:0:0.999999999Z"; taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND); @@ -163,7 +163,7 @@ TEST(testCase, parse_time) { taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, -28800 * MILLISECOND_PER_SECOND); - char* t = "2021-01-08T02:11:40.000+00:00"; + char t[] = "2021-01-08T02:11:40.000+00:00"; taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0); printf("%ld\n", time); } From 2763345e778c8807879c9a368bd81f3a8e832463 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 22 Jun 2021 21:48:01 +0800 Subject: [PATCH 19/37] [TD-4838]: taosdump binary length bug. (#6580) --- src/kit/taosdump/taosdump.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index f80ac069a0..8525496b0e 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -1858,13 +1858,13 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); //pstr = stpcpy(pstr, tbuf); //*(pstr++) = '\''; - pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; } case TSDB_DATA_TYPE_NCHAR: { char tbuf[COMMAND_SIZE] = {0}; convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; } case TSDB_DATA_TYPE_TIMESTAMP: @@ -1897,7 +1897,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* total_sqlstr_len += curr_sqlstr_len; - if ((count >= arguments->data_batch) || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { + if ((count >= arguments->data_batch) + || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { fprintf(fp, ";\n"); count = 0; } //else { @@ -1905,6 +1906,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* //} } + printf("total_sqlstr_len: %d\n", total_sqlstr_len); + fprintf(fp, "\n"); atomic_add_fetch_64(&totalDumpOutRows, totalRows); From 6f2f18f0aac7d818d2bb89ff472520d2b52d1a07 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 23 Jun 2021 09:57:57 +0800 Subject: [PATCH 20/37] remove compile options in test script --- src/query/tests/astTest.cpp | 1 + src/query/tests/histogramTest.cpp | 2 ++ src/query/tests/patternMatchTest.cpp | 2 ++ src/query/tests/percentileTest.cpp | 2 ++ src/query/tests/resultBufferTest.cpp | 2 ++ src/query/tests/tsBufTest.cpp | 2 ++ src/query/tests/unitTest.cpp | 2 +- 7 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/query/tests/astTest.cpp b/src/query/tests/astTest.cpp index ce7b2f94a1..7bd1c0bf8e 100644 --- a/src/query/tests/astTest.cpp +++ b/src/query/tests/astTest.cpp @@ -10,6 +10,7 @@ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Werror=unused-function" typedef struct ResultObj { int32_t numOfResult; diff --git a/src/query/tests/histogramTest.cpp b/src/query/tests/histogramTest.cpp index 3088d6f807..44a31f4241 100644 --- a/src/query/tests/histogramTest.cpp +++ b/src/query/tests/histogramTest.cpp @@ -5,6 +5,8 @@ #include "taos.h" #include "qHistogram.h" + +#pragma GCC diagnostic ignored "-Werror=unused-function" namespace { void doHistogramAddTest() { SHistogramInfo* pHisto = NULL; diff --git a/src/query/tests/patternMatchTest.cpp b/src/query/tests/patternMatchTest.cpp index f3e0d3e119..cd242afc84 100644 --- a/src/query/tests/patternMatchTest.cpp +++ b/src/query/tests/patternMatchTest.cpp @@ -6,6 +6,8 @@ #include "qAggMain.h" #include "tcompare.h" +#pragma GCC diagnostic ignored "-Werror=unused-function" + TEST(testCase, patternMatchTest) { SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; diff --git a/src/query/tests/percentileTest.cpp b/src/query/tests/percentileTest.cpp index 104bfb3c06..952129c8e7 100644 --- a/src/query/tests/percentileTest.cpp +++ b/src/query/tests/percentileTest.cpp @@ -7,6 +7,8 @@ #include "qPercentile.h" +#pragma GCC diagnostic ignored "-Werror=unused-function" + namespace { tMemBucket *createBigIntDataBucket(int32_t start, int32_t end) { tMemBucket *pBucket = tMemBucketCreate(sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, start, end); diff --git a/src/query/tests/resultBufferTest.cpp b/src/query/tests/resultBufferTest.cpp index 491d75ccb9..052e654066 100644 --- a/src/query/tests/resultBufferTest.cpp +++ b/src/query/tests/resultBufferTest.cpp @@ -6,6 +6,8 @@ #include "taos.h" #include "tsdb.h" +#pragma GCC diagnostic ignored "-Werror=unused-function" + namespace { // simple test void simpleTest() { diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index dd7f03a494..30faed1bc2 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -9,6 +9,8 @@ #include "ttoken.h" #include "tutil.h" +#pragma GCC diagnostic ignored "-Werror=unused-function" + namespace { /** * diff --git a/src/query/tests/unitTest.cpp b/src/query/tests/unitTest.cpp index d2b058cf7c..75153a76ae 100644 --- a/src/query/tests/unitTest.cpp +++ b/src/query/tests/unitTest.cpp @@ -13,7 +13,7 @@ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wwrite-strings" - +#pragma GCC diagnostic ignored "-Werror=unused-function" namespace { int32_t testValidateName(char* name) { SStrToken token = {0}; From 1583f180d4dc570f07a33c07da744651249e0eb9 Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Wed, 23 Jun 2021 15:09:59 +0800 Subject: [PATCH 21/37] pytest utils add taosdemoCfg.py --- tests/pytest/util/taosdemoCfg.py | 450 +++++++++++++++++++++++++++++++ 1 file changed, 450 insertions(+) create mode 100644 tests/pytest/util/taosdemoCfg.py diff --git a/tests/pytest/util/taosdemoCfg.py b/tests/pytest/util/taosdemoCfg.py new file mode 100644 index 0000000000..5071e915a5 --- /dev/null +++ b/tests/pytest/util/taosdemoCfg.py @@ -0,0 +1,450 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import inspect +import psutil +import shutil +import json +from util.log import * +from multiprocessing import cpu_count + + +# TODO: fully test the function. Handle exceptions. +# Handle json format not accepted by taosdemo +class TDTaosdemoCfg: + def __init__(self): + self.insert_cfg = { + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": cpu_count(), + "thread_count_create_tbl": cpu_count(), + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 32766, + "max_sql_len": 32766, + "databases": None + } + + self.db = { + "name": 'db', + "drop": 'yes', + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 6, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + self.query_cfg = { + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "query_mode": "taosc", + "specified_table_query": None, + "super_table_query": None + } + + self.table_query = { + "query_interval": 1, + "concurrent": 3, + "sqls": None + } + + self.stable_query = { + "stblname": "stb", + "query_interval": 1, + "threads": 3, + "sqls": None + } + + self.sub_cfg = { + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": None, + "super_table_query": None + } + + self.table_sub = { + "concurrent": 1, + "mode": "sync", + "interval": 10000, + "restart": "yes", + "keepProgress": "yes", + "sqls": None + } + + self.stable_sub = { + "stblname": "stb", + "threads": 1, + "mode": "sync", + "interval": 10000, + "restart": "yes", + "keepProgress": "yes", + "sqls": None + } + + self.stbs = [] + self.stb_template = { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 10, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 32766, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count": 1}], + "tags": [{"type": "BIGINT", "count": 1}] + } + + self.tb_query_sql = [] + self.tb_query_sql_template = { + "sql": "select last_row(*) from stb_0 ", + "result": "temp/query_res0.txt" + } + + self.stb_query_sql = [] + self.stb_query_sql_template = { + "sql": "select last_row(ts) from xxxx", + "result": "temp/query_res2.txt" + } + + self.tb_sub_sql = [] + self.tb_sub_sql_template = { + "sql": "select * from stb_0 ;", + "result": "temp/subscribe_res0.txt" + } + + self.stb_sub_sql = [] + self.stb_sub_sql_template = { + "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", + "result": "temp/subscribe_res1.txt" + } + + # The following functions are import functions for different dicts and lists + # except import_sql, all other import functions will a dict and overwrite the origional dict + # dict_in: the dict used to overwrite the target + def import_insert_cfg(self, dict_in): + self.insert_cfg = dict_in + + def import_db(self, dict_in): + self.db = dict_in + + def import_stbs(self, dict_in): + self.stbs = dict_in + + def import_query_cfg(self, dict_in): + self.query_cfg = dict_in + + def import_table_query(self, dict_in): + self.table_query = dict_in + + def import_stable_query(self, dict_in): + self.stable_query = dict_in + + def import_sub_cfg(self, dict_in): + self.sub_cfg = dict_in + + def import_table_sub(self, dict_in): + self.table_sub = dict_in + + def import_stable_sub(self, dict_in): + self.stable_sub = dict_in + + def import_sql(self, Sql_in, mode): + """used for importing the sql later used + + Args: + Sql_in (dict): the imported sql dict + mode (str): the sql storing location within TDTaosdemoCfg + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + """ + if mode == 'query_table': + self.tb_query_sql = Sql_in + elif mode == 'query_stable': + self.stb_query_sql = Sql_in + elif mode == 'sub_table': + self.tb_sub_sql = Sql_in + elif mode == 'sub_stable': + self.stb_sub_sql = Sql_in + # import functions end + + # The following functions are alter functions for different dicts + # Args: + # key: the key that is going to be modified + # value: the value of the key that is going to be modified + # if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls" + # value will not be used + + def alter_insert_cfg(self, key, value): + + if key == 'databases': + self.insert_cfg[key] = [ + { + 'dbinfo': self.db, + 'super_tables': self.stbs + } + ] + else: + self.insert_cfg[key] = value + + def alter_db(self, key, value): + self.db[key] = value + + def alter_query_tb(self, key, value): + if key == "sqls": + self.table_query[key] = self.tb_query_sql + else: + self.table_query[key] = value + + def alter_query_stb(self, key, value): + if key == "sqls": + self.stable_query[key] = self.stb_query_sql + else: + self.stable_query[key] = value + + def alter_query_cfg(self, key, value): + if key == "specified_table_query": + self.query_cfg["specified_table_query"] = self.table_query + elif key == "super_table_query": + self.query_cfg["super_table_query"] = self.stable_query + else: + self.table_query[key] = value + + def alter_sub_cfg(self, key, value): + if key == "specified_table_query": + self.sub_cfg["specified_table_query"] = self.table_sub + elif key == "super_table_query": + self.sub_cfg["super_table_query"] = self.stable_sub + else: + self.table_query[key] = value + + def alter_sub_stb(self, key, value): + if key == "sqls": + self.stable_sub[key] = self.stb_sub_sql + else: + self.stable_sub[key] = value + + def alter_sub_tb(self, key, value): + if key == "sqls": + self.table_sub[key] = self.tb_sub_sql + else: + self.table_sub[key] = value + # alter function ends + + # the following functions are for handling the sql lists + def append_sql_stb(self, target, value): + """for appending sql dict into specific sql list + + Args: + target (str): the target append list + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + value (dict): the sql dict going to be appended + """ + if target == 'insert_stbs': + self.stbs.append(value) + elif target == 'query_table': + self.tb_query_sql.append(value) + elif target == 'query_stable': + self.stb_query_sql.append(value) + elif target == 'sub_table': + self.tb_sub_sql.append(value) + elif target == 'sub_stable': + self.stb_sub_sql.append(value) + + def pop_sql_stb(self, target, index): + """for poping a sql dict from specific sql list + + Args: + target (str): the target append list + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + index (int): the sql dict that is going to be popped + """ + if target == 'insert_stbs': + self.stbs.pop(index) + elif target == 'query_table': + self.tb_query_sql.pop(index) + elif target == 'query_stable': + self.stb_query_sql.pop(index) + elif target == 'sub_table': + self.tb_sub_sql.pop(index) + elif target == 'sub_stable': + self.stb_sub_sql.pop(index) + # sql list modification function end + + # The following functions are get functions for different dicts + def get_db(self): + return self.db + + def get_stb(self): + return self.stbs + + def get_insert_cfg(self): + return self.insert_cfg + + def get_query_cfg(self): + return self.query_cfg + + def get_tb_query(self): + return self.table_query + + def get_stb_query(self): + return self.stable_query + + def get_sub_cfg(self): + return self.sub_cfg + + def get_tb_sub(self): + return self.table_sub + + def get_stb_sub(self): + return self.stable_sub + + def get_sql(self, target): + """general get function for all sql lists + + Args: + target (str): the sql list want to get + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + """ + if target == 'query_table': + return self.tb_query_sql + elif target == 'query_stable': + return self.stb_query_sql + elif target == 'sub_table': + return self.tb_sub_sql + elif target == 'sub_stable': + return self.stb_sub_sql + + def get_template(self, target): + """general get function for the default sql template + + Args: + target (str): the sql list want to get + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + """ + if target == 'insert_stbs': + return self.stb_template + elif target == 'query_table': + return self.tb_query_sql_template + elif target == 'query_stable': + return self.stb_query_sql_template + elif target == 'sub_table': + return self.tb_sub_sql_template + elif target == 'sub_stable': + return self.stb_sub_sql_template + else: + print(f'did not find {target}') + + # the folloing are the file generation functions + """defalut document: + generator functio for generating taosdemo json file + will assemble the dicts and dump the final json + + Args: + pathName (str): the directory wanting the json file to be + fileName (str): the name suffix of the json file + Returns: + str: [pathName]/[filetype]_[filName].json + """ + + def generate_insert_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/insert_{fileName}.json' + self.alter_insert_cfg('databases', None) + with open(cfgFileName, 'w') as file: + json.dump(self.insert_cfg, file) + return cfgFileName + + def generate_query_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/query_{fileName}.json' + self.alter_query_tb('sqls', None) + self.alter_query_stb('sqls', None) + self.alter_query_cfg('specified_table_query', None) + self.alter_query_cfg('super_table_query', None) + with open(cfgFileName, 'w') as file: + json.dump(self.query_cfg, file) + return cfgFileName + + def generate_subscribe_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/subscribe_{fileName}.json' + self.alter_sub_tb('sqls', None) + self.alter_sub_stb('sqls', None) + self.alter_sub_cfg('specified_table_query', None) + self.alter_sub_cfg('super_table_query', None) + with open(cfgFileName, 'w') as file: + json.dump(self.sub_cfg, file) + return cfgFileName + # file generation functions ends + + def drop_cfg_file(self, fileName): + os.remove(f'{fileName}') + + +taosdemoCfg = TDTaosdemoCfg() From f0091359b2e74418c78891ddaf7c26f0d4fd9263 Mon Sep 17 00:00:00 2001 From: cpwu Date: Wed, 23 Jun 2021 15:32:36 +0800 Subject: [PATCH 22/37] [TD-3825] to fix bug which make with asan caused --- src/client/src/tscServer.c | 4 +++- src/kit/taosdemo/taosdemo.c | 2 +- src/mnode/src/mnodeTable.c | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 8c5e99474d..fa277cd9c3 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1495,7 +1495,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg = (char *)pSchema; pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen); - memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen); + if (pAlterInfo->tagData.dataLen > 0) { + memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen); + } pMsg += pAlterInfo->tagData.dataLen; msgLen = (int32_t)(pMsg - (char*)pAlterTableMsg); diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 33ee2a9bc2..40168e5e97 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -90,7 +90,7 @@ enum TEST_MODE { #define OPT_ABORT 1 /* –abort */ #define STRING_LEN 60000 #define MAX_PREPARED_RAND 1000000 -#define MAX_FILE_NAME_LEN 256 +#define MAX_FILE_NAME_LEN 128 #define MAX_SAMPLES_ONCE_FROM_FILE 10000 #define MAX_NUM_DATATYPE 10 diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 032c6ee94b..ba4de89cde 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1068,7 +1068,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { pStable->info.tableId = strdup(pCreate->tableName); pStable->info.type = TSDB_SUPER_TABLE; pStable->createdTime = taosGetTimestampMs(); - pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); + int64_t x = (us&0x000000FFFFFFFFFF); + x = x<<24; + pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); pStable->sversion = 0; pStable->tversion = 0; pStable->numOfColumns = numOfColumns; From e1505aceb160b174df0b31d1110b0a1b55a0f707 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 23 Jun 2021 15:51:47 +0800 Subject: [PATCH 23/37] [TD-4874]: reset customScore to 0 --- src/mnode/src/mnodeDnode.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 70a63517ca..ce21af49c2 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -101,6 +101,8 @@ static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) { pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED; } + pDnode->customScore = 0; + dnodeUpdateEp(pDnode->dnodeId, pDnode->dnodeEp, pDnode->dnodeFqdn, &pDnode->dnodePort); mnodeUpdateDnodeEps(); From ce3c6d7a61ec0e4223caf2df59cfe53262cbac70 Mon Sep 17 00:00:00 2001 From: cpwu Date: Wed, 23 Jun 2021 16:58:01 +0800 Subject: [PATCH 24/37] [TD-3825] to fix bug --- tests/tsim/src/simMain.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c index 6a9d96bc3b..7d74c62c7d 100644 --- a/tests/tsim/src/simMain.c +++ b/tests/tsim/src/simMain.c @@ -35,7 +35,7 @@ int32_t main(int32_t argc, char *argv[]) { for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-c") == 0 && i < argc - 1) { - tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN); + tstrncpy(configDir, argv[++i], 128); } else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) { strcpy(scriptFile, argv[++i]); } else if (strcmp(argv[i], "-a") == 0) { @@ -75,4 +75,4 @@ int32_t main(int32_t argc, char *argv[]) { simInfo("execute result %d", ret); return ret; -} \ No newline at end of file +} From cbc60231c549a2e40ff231110791f599c1fe4e8f Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 23 Jun 2021 21:48:47 +0800 Subject: [PATCH 25/37] Hotfix/sangshuduo/td 3973 use jemalloc for master (#6598) * [TD-3973]: add jemalloc as submodule. * [TD-3973]: add jemalloc as submodule. * [TD-3973]: use jemalloc. build works as following instructions: cmake .. -DJEMALLOC_ENABLED=true make * fix jemalloc at tag 5.2.1 * fix conflicts * make install works. * fix conflicts. * release script works. Co-authored-by: Shuduo Sang --- .gitmodules | 3 + cmake/define.inc | 5 + deps/CMakeLists.txt | 13 ++ deps/jemalloc | 1 + packaging/deb/makedeb.sh | 50 +++++- packaging/release.sh | 44 +++-- packaging/rpm/makerpm.sh | 6 +- packaging/rpm/tdengine.spec | 81 +++++++-- packaging/tools/install.sh | 309 ++++++++++++++++++-------------- packaging/tools/make_install.sh | 113 ++++++++---- packaging/tools/makepkg.sh | 51 +++++- src/dnode/CMakeLists.txt | 9 +- src/kit/shell/CMakeLists.txt | 11 +- src/kit/taosdemo/CMakeLists.txt | 13 +- src/os/inc/osMemory.h | 4 + 15 files changed, 488 insertions(+), 225 deletions(-) create mode 160000 deps/jemalloc diff --git a/.gitmodules b/.gitmodules index 0e65b02221..346f5c0069 100644 --- a/.gitmodules +++ b/.gitmodules @@ -10,3 +10,6 @@ [submodule "tests/examples/rust"] path = tests/examples/rust url = https://github.com/songtianyi/tdengine-rust-bindings.git +[submodule "deps/jemalloc"] + path = deps/jemalloc + url = https://github.com/jemalloc/jemalloc diff --git a/cmake/define.inc b/cmake/define.inc index da80baabbb..69807a8a05 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -59,6 +59,11 @@ IF (TD_LINUX_64) MESSAGE(STATUS "linux64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ADD_DEFINITIONS(-DUSE_LIBICONV) + + IF (JEMALLOC_ENABLED) + ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) + ENDIF () + ENDIF () IF (TD_LINUX_32) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index cfc17442f5..99152c6ce3 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -18,3 +18,16 @@ ENDIF () IF (TD_DARWIN AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) ENDIF () + +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + MESSAGE("setup dpes/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR}) + MESSAGE("binary dir:" ${CMAKE_BINARY_DIR}) + include(ExternalProject) + ExternalProject_Add(jemalloc + PREFIX "jemalloc" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND ${MAKE} + ) +ENDIF () diff --git a/deps/jemalloc b/deps/jemalloc new file mode 160000 index 0000000000..ea6b3e973b --- /dev/null +++ b/deps/jemalloc @@ -0,0 +1 @@ +Subproject commit ea6b3e973b477b8061e0076bb257dbd7f3faa756 diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 28be037e6c..e6ddb6d742 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -24,14 +24,14 @@ echo "compile_dir: ${compile_dir}" echo "pkg_dir: ${pkg_dir}" if [ -d ${pkg_dir} ]; then - rm -rf ${pkg_dir} + rm -rf ${pkg_dir} fi mkdir -p ${pkg_dir} cd ${pkg_dir} libfile="libtaos.so.${tdengine_ver}" -# create install dir +# create install dir install_home_path="/usr/local/taos" mkdir -p ${pkg_dir}${install_home_path} mkdir -p ${pkg_dir}${install_home_path}/bin @@ -42,7 +42,7 @@ mkdir -p ${pkg_dir}${install_home_path}/examples mkdir -p ${pkg_dir}${install_home_path}/include mkdir -p ${pkg_dir}${install_home_path}/init.d mkdir -p ${pkg_dir}${install_home_path}/script - + cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script @@ -54,7 +54,7 @@ cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_pat cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin -cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver +cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples @@ -67,7 +67,41 @@ fi cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector -cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector ||: +cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||: + +if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then + install_user_local_path="/usr/local" + mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/ + if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then + cp ${compile_dir}/build/bin/jemalloc.sh ${pkg_dir}${install_user_local_path}/bin/ + fi + if [ -f ${compile_dir}/build/bin/jeprof ]; then + cp ${compile_dir}/build/bin/jeprof ${pkg_dir}${install_user_local_path}/bin/ + fi + if [ -f ${compile_dir}/build/include/jemalloc/jemalloc.h ]; then + cp ${compile_dir}/build/include/jemalloc/jemalloc.h ${pkg_dir}${install_user_local_path}/include/jemalloc/ + fi + if [ -f ${compile_dir}/build/lib/libjemalloc.so.2 ]; then + cp ${compile_dir}/build/lib/libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/ + ln -sf libjemalloc.so.2 ${pkg_dir}${install_user_local_path}/lib/libjemalloc.so + fi + if [ -f ${compile_dir}/build/lib/libjemalloc.a ]; then + cp ${compile_dir}/build/lib/libjemalloc.a ${pkg_dir}${install_user_local_path}/lib/ + fi + if [ -f ${compile_dir}/build/lib/libjemalloc_pic.a ]; then + cp ${compile_dir}/build/lib/libjemalloc_pic.a ${pkg_dir}${install_user_local_path}/lib/ + fi + if [ -f ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ]; then + cp ${compile_dir}/build/lib/pkgconfig/jemalloc.pc ${pkg_dir}${install_user_local_path}/lib/pkgconfig/ + fi + if [ -f ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ]; then + cp ${compile_dir}/build/share/doc/jemalloc/jemalloc.html ${pkg_dir}${install_user_local_path}/share/doc/jemalloc/ + fi + if [ -f ${compile_dir}/build/share/man/man3/jemalloc.3 ]; then + cp ${compile_dir}/build/share/man/man3/jemalloc.3 ${pkg_dir}${install_user_local_path}/share/man/man3/ + fi +fi cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/ chmod 755 ${pkg_dir}/DEBIAN/* @@ -75,7 +109,7 @@ chmod 755 ${pkg_dir}/DEBIAN/* # modify version of control debver="Version: "$tdengine_ver sed -i "2c$debver" ${pkg_dir}/DEBIAN/control - + #get taos version, then set deb name @@ -90,7 +124,7 @@ fi if [ "$verType" == "beta" ]; then debname=${debname}-${verType}".deb" -elif [ "$verType" == "stable" ]; then +elif [ "$verType" == "stable" ]; then debname=${debname}".deb" else echo "unknow verType, nor stabel or beta" @@ -101,7 +135,7 @@ fi dpkg -b ${pkg_dir} $debname echo "make deb package success!" -cp ${pkg_dir}/*.deb ${output_dir} +cp ${pkg_dir}/*.deb ${output_dir} # clean tmep dir rm -rf ${pkg_dir} diff --git a/packaging/release.sh b/packaging/release.sh index 68f947ccab..f5027246a6 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -5,8 +5,8 @@ set -e #set -x -# releash.sh -v [cluster | edge] -# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] +# release.sh -v [cluster | edge] +# -c [aarch32 | aarch64 | x64 | x86 | mips64 ...] # -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] # -V [stable | beta] # -l [full | lite] @@ -22,11 +22,12 @@ cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 ...] osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] pagMode=full # [full | lite] soMode=dynamic # [static | dynamic] +allocator=glibc # [glibc | jemalloc] dbName=taos # [taos | power] verNumber="" verNumberComp="2.0.0.0" -while getopts "hv:V:c:o:l:s:d:n:m:" arg +while getopts "hv:V:c:o:l:s:d:a:n:m:" arg do case $arg in v) @@ -53,6 +54,10 @@ do #echo "dbName=$OPTARG" dbName=$(echo $OPTARG) ;; + a) + #echo "allocator=$OPTARG" + allocator=$(echo $OPTARG) + ;; n) #echo "verNumber=$OPTARG" verNumber=$(echo $OPTARG) @@ -71,20 +76,21 @@ do echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] " echo " -V [stable | beta] " echo " -l [full | lite] " + echo " -a [glibc | jemalloc] " echo " -s [static | dynamic] " echo " -d [taos | power] " echo " -n [version number] " echo " -m [compatible version number] " exit 0 ;; - ?) #unknow option + ?) #unknow option echo "unkonw argument" exit 1 ;; esac done -echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} verNumber=${verNumber} verNumberComp=${verNumberComp}" +echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp}" curr_dir=$(pwd) @@ -118,7 +124,7 @@ function vercomp () { echo 0 exit 0 fi - + local IFS=. local i ver1=($1) ver2=($2) @@ -164,7 +170,7 @@ if [[ "$verMode" == "cluster" ]]; then else gitinfoOfInternal=NULL fi - + cd ${curr_dir} # 2. cmake executable file @@ -180,12 +186,18 @@ else fi cd ${compile_dir} +if [[ "$allocator" == "jemalloc" ]]; then + allocator_macro="-DJEMALLOC_ENABLED=true" +else + allocator_macro="" +fi + # check support cpu type if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]] ; then if [ "$verMode" != "cluster" ]; then - cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} + cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} ${allocator_macro} else - cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} + cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} ${allocator_macro} fi else echo "input cpuType=${cpuType} error!!!" @@ -199,9 +211,9 @@ cd ${curr_dir} # 3. Call the corresponding script for packaging if [ "$osType" != "Darwin" ]; then if [[ "$verMode" != "cluster" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then - ret='0' + ret='0' command -v dpkg >/dev/null 2>&1 || { ret='1'; } - if [ "$ret" -eq 0 ]; then + if [ "$ret" -eq 0 ]; then echo "====do deb package for the ubuntu system====" output_dir="${top_dir}/debs" if [ -d ${output_dir} ]; then @@ -214,9 +226,9 @@ if [ "$osType" != "Darwin" ]; then echo "==========dpkg command not exist, so not release deb package!!!" fi - ret='0' + ret='0' command -v rpmbuild >/dev/null 2>&1 || { ret='1'; } - if [ "$ret" -eq 0 ]; then + if [ "$ret" -eq 0 ]; then echo "====do rpm package for the centos system====" output_dir="${top_dir}/rpms" if [ -d ${output_dir} ]; then @@ -229,11 +241,11 @@ if [ "$osType" != "Darwin" ]; then echo "==========rpmbuild command not exist, so not release rpm package!!!" fi fi - + echo "====do tar.gz package for all systems====" cd ${script_dir}/tools - - if [[ "$dbName" == "taos" ]]; then + + if [[ "$dbName" == "taos" ]]; then ${csudo} ./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${csudo} ./makearbi.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index 678e75c500..7c3272f8d0 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Generate rpm package for centos +# Generate rpm package for centos set -e # set -x @@ -60,7 +60,7 @@ ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_di # copy rpm package to output_dir, and modify package name, then clean temp dir #${csudo} cp -rf RPMS/* ${output_dir} -cp_rpm_package ${pkg_dir}/RPMS +cp_rpm_package ${pkg_dir}/RPMS if [ "$verMode" == "cluster" ]; then @@ -74,7 +74,7 @@ fi if [ "$verType" == "beta" ]; then rpmname=${rpmname}-${verType}".rpm" -elif [ "$verType" == "stable" ]; then +elif [ "$verType" == "stable" ]; then rpmname=${rpmname}".rpm" else echo "unknow verType, nor stabel or beta" diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 9910e20bfe..8a870286ab 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -1,4 +1,5 @@ %define homepath /usr/local/taos +%define userlocalpath /usr/local %define cfg_install_dir /etc/taos %define __strip /bin/true @@ -12,22 +13,22 @@ URL: www.taosdata.com AutoReqProv: no #BuildRoot: %_topdir/BUILDROOT -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root #Prefix: /usr/local/taos -#BuildRequires: -#Requires: +#BuildRequires: +#Requires: %description Big Data Platform Designed and Optimized for IoT -#"prep" Nothing needs to be done +#"prep" Nothing needs to be done #%prep #%setup -q -#%setup -T +#%setup -T -#"build" Nothing needs to be done +#"build" Nothing needs to be done #%build #%configure #make %{?_smp_mflags} @@ -75,9 +76,53 @@ fi cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector -cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector ||: +cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||: cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples + +if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then + mkdir -p %{buildroot}%{userlocalpath}/bin + mkdir -p %{buildroot}%{userlocalpath}/lib + mkdir -p %{buildroot}%{userlocalpath}/lib/pkgconfig + mkdir -p %{buildroot}%{userlocalpath}/include + mkdir -p %{buildroot}%{userlocalpath}/include/jemalloc + mkdir -p %{buildroot}%{userlocalpath}/share + mkdir -p %{buildroot}%{userlocalpath}/share/doc + mkdir -p %{buildroot}%{userlocalpath}/share/doc/jemalloc + mkdir -p %{buildroot}%{userlocalpath}/share/man + mkdir -p %{buildroot}%{userlocalpath}/share/man/man3 + + cp %{_compiledir}/build/bin/jemalloc-config %{buildroot}%{userlocalpath}/bin/ + if [ -f %{_compiledir}/build/bin/jemalloc.sh ]; then + cp %{_compiledir}/build/bin/jemalloc.sh %{buildroot}%{userlocalpath}/bin/ + fi + if [ -f %{_compiledir}/build/bin/jeprof ]; then + cp %{_compiledir}/build/bin/jeprof %{buildroot}%{userlocalpath}/bin/ + fi + if [ -f %{_compiledir}/build/include/jemalloc/jemalloc.h ]; then + cp %{_compiledir}/build/include/jemalloc/jemalloc.h %{buildroot}%{userlocalpath}/include/jemalloc/ + fi + if [ -f %{_compiledir}/build/lib/libjemalloc.so.2 ]; then + cp %{_compiledir}/build/lib/libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/ + ln -sf libjemalloc.so.2 %{buildroot}%{userlocalpath}/lib/libjemalloc.so + fi + if [ -f %{_compiledir}/build/lib/libjemalloc.a ]; then + cp %{_compiledir}/build/lib/libjemalloc.a %{buildroot}%{userlocalpath}/lib/ + fi + if [ -f %{_compiledir}/build/lib/libjemalloc_pic.a ]; then + cp %{_compiledir}/build/lib/libjemalloc_pic.a %{buildroot}%{userlocalpath}/lib/ + fi + if [ -f %{_compiledir}/build/lib/pkgconfig/jemalloc.pc ]; then + cp %{_compiledir}/build/lib/pkgconfig/jemalloc.pc %{buildroot}%{userlocalpath}/lib/pkgconfig/ + fi + if [ -f %{_compiledir}/build/share/doc/jemalloc/jemalloc.html ]; then + cp %{_compiledir}/build/share/doc/jemalloc/jemalloc.html %{buildroot}%{userlocalpath}/share/doc/jemalloc/ + fi + if [ -f %{_compiledir}/build/share/man/man3/jemalloc.3 ]; then + cp %{_compiledir}/build/share/man/man3/jemalloc.3 %{buildroot}%{userlocalpath}/share/man/man3/ + fi +fi + #Scripts executed before installation %pre csudo="" @@ -103,7 +148,7 @@ fi # if taos.cfg already softlink, remove it if [ -f %{cfg_install_dir}/taos.cfg ]; then ${csudo} rm -f %{homepath}/cfg/taos.cfg || : -fi +fi # there can not libtaos.so*, otherwise ln -s error ${csudo} rm -f %{homepath}/driver/libtaos* || : @@ -116,18 +161,18 @@ if command -v sudo > /dev/null; then fi cd %{homepath}/script ${csudo} ./post.sh - + # Scripts executed before uninstall %preun csudo="" if command -v sudo > /dev/null; then csudo="sudo" fi -# only remove package to call preun.sh, not but update(2) +# only remove package to call preun.sh, not but update(2) if [ $1 -eq 0 ];then #cd %{homepath}/script #${csudo} ./preun.sh - + if [ -f %{homepath}/script/preun.sh ]; then cd %{homepath}/script ${csudo} ./preun.sh @@ -135,7 +180,7 @@ if [ $1 -eq 0 ];then bin_link_dir="/usr/bin" lib_link_dir="/usr/lib" inc_link_dir="/usr/include" - + data_link_dir="/usr/local/taos/data" log_link_dir="/usr/local/taos/log" cfg_link_dir="/usr/local/taos/cfg" @@ -149,20 +194,20 @@ if [ $1 -eq 0 ];then ${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - + ${csudo} rm -f ${log_link_dir} || : ${csudo} rm -f ${data_link_dir} || : - + pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : - fi - fi + fi + fi fi - + # Scripts executed after uninstall %postun - + # clean build dir %clean csudo="" diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 178a248cfe..325ac81053 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -59,11 +59,11 @@ initd_mod=0 service_mod=2 if pidof systemd &> /dev/null; then service_mod=0 -elif $(which service &> /dev/null); then +elif $(which service &> /dev/null); then service_mod=1 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" if $(which chkconfig &> /dev/null); then - initd_mod=1 + initd_mod=1 elif $(which insserv &> /dev/null); then initd_mod=2 elif $(which update-rc.d &> /dev/null); then @@ -71,7 +71,7 @@ elif $(which service &> /dev/null); then else service_mod=2 fi -else +else service_mod=2 fi @@ -103,7 +103,7 @@ elif echo $osinfo | grep -qwi "fedora" ; then os_type=2 else echo " osinfo: ${osinfo}" - echo " This is an officially unverified linux system," + echo " This is an officially unverified linux system," echo " if there are any problems with the installation and operation, " echo " please feel free to contact taosdata.com for support." os_type=1 @@ -138,7 +138,7 @@ do echo "Usage: `basename $0` -v [server | client] -e [yes | no]" exit 0 ;; - ?) #unknow option + ?) #unknow option echo "unkonw argument" exit 1 ;; @@ -157,9 +157,9 @@ function kill_process() { function install_main_path() { #create install main dir and all sub dir ${csudo} rm -rf ${install_main_dir} || : - ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}/cfg - ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples @@ -168,10 +168,10 @@ function install_main_path() { if [ "$verMode" == "cluster" ]; then ${csudo} mkdir -p ${nginx_dir} fi - + if [[ -e ${script_dir}/email ]]; then - ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: - fi + ${csudo} cp ${script_dir}/email ${install_main_dir}/ ||: + fi } function install_bin() { @@ -207,29 +207,75 @@ function install_lib() { ${csudo} rm -f ${lib_link_dir}/libtaos.* || : ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : #${csudo} rm -rf ${v15_java_app_dir} || : - ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - + ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* + ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi - - #if [ "$verMode" == "cluster" ]; then + + #if [ "$verMode" == "cluster" ]; then # # Compatible with version 1.5 # ${csudo} mkdir -p ${v15_java_app_dir} # ${csudo} ln -s ${install_main_dir}/connector/taos-jdbcdriver-1.0.2-dist.jar ${v15_java_app_dir}/JDBCDriver-1.0.2-dist.jar # ${csudo} chmod 777 ${v15_java_app_dir} || : #fi - + ${csudo} ldconfig } +function install_jemalloc() { + jemalloc_dir=${script_dir}/jemalloc + + if [ -d ${jemalloc_dir} ]; then + ${csudo} /usr/bin/install -c -d /usr/local/bin + + if [ -f ${jemalloc_dir}/bin/jemalloc-config ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jemalloc.sh ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${jemalloc_dir}/bin/jeprof ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/bin/jeprof /usr/local/bin + fi + if [ -f ${jemalloc_dir}/include/jemalloc/jemalloc.h ]; then + ${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc.so.2 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.so.2 /usr/local/lib + ${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + ${csudo} /usr/bin/install -c -d /usr/local/lib + if [ -f ${jemalloc_dir}/lib/libjemalloc.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -m 755 ${jemalloc_dir}/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${jemalloc_dir}/lib/libjemalloc_pic.a ]; then + ${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${jemalloc_dir}/share/man/man3/jemalloc.3 ]; then + ${csudo} /usr/bin/install -c -d /usr/local/share/man/man3 + ${csudo} /usr/bin/install -c -m 644 ${jemalloc_dir}/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + fi +} + function install_header() { ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} cp -f ${script_dir}/inc/* ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } @@ -246,13 +292,13 @@ function add_newHostname_to_hosts() { if [[ "$s" == "$localIp" ]]; then return fi - done + done ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: } function set_hostname() { echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" - read newHostname + read newHostname while true; do if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then break @@ -266,25 +312,25 @@ function set_hostname() { if [[ $retval != 0 ]]; then echo echo "set hostname fail!" - return + return fi #echo -e -n "$(hostnamectl status --static)" #echo -e -n "$(hostnamectl status --transient)" #echo -e -n "$(hostnamectl status --pretty)" - + #ubuntu/centos /etc/hostname if [[ -e /etc/hostname ]]; then ${csudo} echo $newHostname > /etc/hostname ||: fi - + #debian: #HOSTNAME=yourname if [[ -e /etc/sysconfig/network ]]; then ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: fi ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg - serverFqdn=$newHostname - + serverFqdn=$newHostname + if [[ -e /etc/hosts ]]; then add_newHostname_to_hosts $newHostname fi @@ -302,7 +348,7 @@ function is_correct_ipaddr() { return 0 fi done - + return 1 } @@ -316,13 +362,13 @@ function set_ipAsFqdn() { echo echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" localFqdn="127.0.0.1" - # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg serverFqdn=$localFqdn echo return - fi - + fi + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" echo echo -e -n "${GREEN}$iplist${NC}" @@ -331,15 +377,15 @@ function set_ipAsFqdn() { echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" read localFqdn while true; do - if [ ! -z "$localFqdn" ]; then + if [ ! -z "$localFqdn" ]; then # Check if correct ip address is_correct_ipaddr $localFqdn retval=`echo $?` if [[ $retval != 0 ]]; then read -p "Please choose an IP from local IP list:" localFqdn else - # Write the local FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg serverFqdn=$localFqdn break fi @@ -354,59 +400,59 @@ function local_fqdn_check() { echo echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" echo - if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" echo - + while true do - read -r -p "Set hostname now? [Y/n] " input - if [ ! -n "$input" ]; then - set_hostname - break - else - case $input in - [yY][eE][sS]|[yY]) - set_hostname - break - ;; - - [nN][oO]|[nN]) - set_ipAsFqdn - break - ;; - - *) - echo "Invalid input..." - ;; - esac - fi + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi done fi } function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/cfg/taos.cfg ] && ${csudo} cp ${script_dir}/cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* - fi - + fi + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client - + if ((${update_flag}==1)); then return 0 fi - + if [ "$interactiveFqdn" == "no" ]; then return 0 - fi - + fi + local_fqdn_check #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" @@ -424,8 +470,8 @@ function install_config() { if [ ! -z "$firstEp" ]; then # check the format of the firstEp #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg break #else # read -p "Please enter the correct FQDN:port: " firstEp @@ -433,9 +479,9 @@ function install_config() { else break fi - done + done - # user email + # user email #EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$' #EMAIL_PATTERN='^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$' #EMAIL_PATTERN="^[\w-]+(\.[\w-]+)*@[\w-]+(\.[\w-]+)+$" @@ -446,31 +492,31 @@ function install_config() { if [ ! -z "$emailAddr" ]; then # check the format of the emailAddr #if [[ "$emailAddr" =~ $EMAIL_PATTERN ]]; then - # Write the email address to temp file - email_file="${install_main_dir}/email" + # Write the email address to temp file + email_file="${install_main_dir}/email" ${csudo} bash -c "echo $emailAddr > ${email_file}" - break + break #else - # read -p "Please enter the correct email address: " emailAddr + # read -p "Please enter the correct email address: " emailAddr #fi else break fi - done + done } function install_log() { ${csudo} rm -rf ${log_dir} || : ${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir} - + ${csudo} ln -s ${log_dir} ${install_main_dir}/log } function install_data() { ${csudo} mkdir -p ${data_dir} - - ${csudo} ln -s ${data_dir} ${install_main_dir}/data + + ${csudo} ln -s ${data_dir} ${install_main_dir}/data } function install_connector() { @@ -485,26 +531,26 @@ function install_examples() { function clean_service_on_sysvinit() { #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then ${csudo} service taosd stop || : fi - + if pidof tarbitrator &> /dev/null; then ${csudo} service tarbitratord stop || : fi if ((${initd_mod}==1)); then - if [ -e ${service_config_dir}/taosd ]; then + if [ -e ${service_config_dir}/taosd ]; then ${csudo} chkconfig --del taosd || : fi - if [ -e ${service_config_dir}/tarbitratord ]; then + if [ -e ${service_config_dir}/tarbitratord ]; then ${csudo} chkconfig --del tarbitratord || : fi elif ((${initd_mod}==2)); then - if [ -e ${service_config_dir}/taosd ]; then + if [ -e ${service_config_dir}/taosd ]; then ${csudo} insserv -r taosd || : fi if [ -e ${service_config_dir}/tarbitratord ]; then @@ -518,10 +564,10 @@ function clean_service_on_sysvinit() { ${csudo} update-rc.d -f tarbitratord remove || : fi fi - + ${csudo} rm -f ${service_config_dir}/taosd || : ${csudo} rm -f ${service_config_dir}/tarbitratord || : - + if $(which init &> /dev/null); then ${csudo} init q || : fi @@ -544,10 +590,10 @@ function install_service_on_sysvinit() { ${csudo} cp -f ${script_dir}/init.d/tarbitratord.rpm ${install_main_dir}/init.d/tarbitratord ${csudo} cp ${script_dir}/init.d/tarbitratord.rpm ${service_config_dir}/tarbitratord && ${csudo} chmod a+x ${service_config_dir}/tarbitratord fi - + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" - + if ((${initd_mod}==1)); then ${csudo} chkconfig --add taosd || : ${csudo} chkconfig --level 2345 taosd on || : @@ -572,7 +618,7 @@ function clean_service_on_systemd() { fi ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} - + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" if systemctl is-active --quiet tarbitratord; then echo "tarbitrator is running, stopping it..." @@ -580,7 +626,7 @@ function clean_service_on_systemd() { fi ${csudo} systemctl disable tarbitratord &> /dev/null || echo &> /dev/null ${csudo} rm -f ${tarbitratord_service_config} - + if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" if systemctl is-active --quiet nginxd; then @@ -588,8 +634,8 @@ function clean_service_on_systemd() { ${csudo} systemctl stop nginxd &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable nginxd &> /dev/null || echo &> /dev/null - ${csudo} rm -f ${nginx_service_config} - fi + ${csudo} rm -f ${nginx_service_config} + fi } # taos:2345:respawn:/etc/init.d/taosd start @@ -621,7 +667,7 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" ${csudo} systemctl enable taosd - + tarbitratord_service_config="${service_config_dir}/tarbitratord.service" ${csudo} bash -c "echo '[Unit]' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'Description=TDengine arbitrator service' >> ${tarbitratord_service_config}" @@ -643,9 +689,9 @@ function install_service_on_systemd() { ${csudo} bash -c "echo >> ${tarbitratord_service_config}" ${csudo} bash -c "echo '[Install]' >> ${tarbitratord_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${tarbitratord_service_config}" - #${csudo} systemctl enable tarbitratord - - if [ "$verMode" == "cluster" ]; then + #${csudo} systemctl enable tarbitratord + + if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/nginxd.service" ${csudo} bash -c "echo '[Unit]' >> ${nginx_service_config}" ${csudo} bash -c "echo 'Description=Nginx For TDengine Service' >> ${nginx_service_config}" @@ -674,7 +720,7 @@ function install_service_on_systemd() { ${csudo} systemctl enable nginxd fi ${csudo} systemctl start nginxd - fi + fi } function install_service() { @@ -757,7 +803,7 @@ function update_TDengine() { fi sleep 1 fi - + if [ "$verMode" == "cluster" ]; then if pidof nginx &> /dev/null; then if ((${service_mod}==0)); then @@ -770,12 +816,13 @@ function update_TDengine() { sleep 1 fi fi - + install_main_path install_log install_header install_lib + install_jemalloc if [ "$pagMode" != "lite" ]; then install_connector fi @@ -783,10 +830,10 @@ function update_TDengine() { if [ -z $1 ]; then install_bin install_service - install_config - + install_config + openresty_work=false - if [ "$verMode" == "cluster" ]; then + if [ "$verMode" == "cluster" ]; then # Check if openresty is installed # Check if nginx is installed successfully if type curl &> /dev/null; then @@ -797,7 +844,7 @@ function update_TDengine() { echo -e "\033[44;31;5mNginx for TDengine does not work! Please try again!\033[0m" fi fi - fi + fi #echo #echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" @@ -816,7 +863,7 @@ function update_TDengine() { else echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}" fi - + echo echo -e "\033[44;32;1mTDengine is updated successfully!${NC}" else @@ -839,14 +886,14 @@ function install_TDengine() { tar -zxf taos.tar.gz echo -e "${GREEN}Start to install TDengine...${NC}" - - install_main_path - + + install_main_path + if [ -z $1 ]; then install_data - fi - - install_log + fi + + install_log install_header install_lib if [ "$pagMode" != "lite" ]; then @@ -871,8 +918,8 @@ function install_TDengine() { fi fi fi - - install_config + + install_config # Ask if to start the service #echo @@ -885,36 +932,36 @@ function install_TDengine() { echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" else echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" - fi + fi #if [ ${openresty_work} = 'true' ]; then # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" #else # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" #fi - + if [ ! -z "$firstEp" ]; then - tmpFqdn=${firstEp%%:*} - substr=":" - if [[ $firstEp =~ $substr ]];then - tmpPort=${firstEp#*:} - else - tmpPort="" - fi - if [[ "$tmpPort" != "" ]];then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" - else - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" - fi - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" - echo + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" + echo fi - + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" - echo + echo else # Only install client install_bin install_config @@ -945,6 +992,6 @@ elif [ "$verType" == "client" ]; then else install_TDengine client fi -else - echo "please input correct verType" +else + echo "please input correct verType" fi diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index d6ace0a063..0c755d9f72 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -1,12 +1,12 @@ #!/bin/bash # -# This file is used to install TAOS time-series database on linux systems. The operating system +# This file is used to install TAOS time-series database on linux systems. The operating system # is required to use systemd to manage services at boot set -e # set -x -# -----------------------Variables definition--------------------- +# -----------------------Variables definition source_dir=$1 binary_dir=$2 osType=$3 @@ -71,9 +71,9 @@ if [ "$osType" != "Darwin" ]; then service_mod=0 elif $(which service &> /dev/null); then service_mod=1 - service_config_dir="/etc/init.d" + service_config_dir="/etc/init.d" if $(which chkconfig &> /dev/null); then - initd_mod=1 + initd_mod=1 elif $(which insserv &> /dev/null); then initd_mod=2 elif $(which update-rc.d &> /dev/null); then @@ -123,9 +123,9 @@ function kill_taosd() { function install_main_path() { #create install main dir and all sub dir ${csudo} rm -rf ${install_main_dir} || : - ${csudo} mkdir -p ${install_main_dir} + ${csudo} mkdir -p ${install_main_dir} ${csudo} mkdir -p ${install_main_dir}/cfg - ${csudo} mkdir -p ${install_main_dir}/bin + ${csudo} mkdir -p ${install_main_dir}/bin ${csudo} mkdir -p ${install_main_dir}/connector ${csudo} mkdir -p ${install_main_dir}/driver ${csudo} mkdir -p ${install_main_dir}/examples @@ -176,6 +176,49 @@ function install_bin() { [ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || : fi } +function install_jemalloc() { + if [ "$osType" != "Darwin" ]; then + /usr/bin/install -c -d /usr/local/bin + + if [ -f ${binary_dir}/build/bin/jemalloc-config ]; then + /usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin + fi + if [ -f ${binary_dir}/build/bin/jemalloc.sh ]; then + /usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin + fi + if [ -f ${binary_dir}/build/bin/jeprof ]; then + /usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin + fi + if [ -f ${binary_dir}/build/include/jemalloc/jemalloc.h ]; then + /usr/bin/install -c -d /usr/local/include/jemalloc + /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + fi + if [ -f ${binary_dir}/build/lib/libjemalloc.so.2 ]; then + /usr/bin/install -c -d /usr/local/lib + /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib + ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so + /usr/bin/install -c -d /usr/local/lib + if [ -f ${binary_dir}/build/lib/libjemalloc.a ]; then + /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib + fi + if [ -f ${binary_dir}/build/lib/libjemalloc_pic.a ]; then + /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib + fi + if [ -f ${binary_dir}/build/lib/pkgconfig/jemalloc.pc ]; then + /usr/bin/install -c -d /usr/local/lib/pkgconfig + /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + fi + fi + if [ -f ${binary_dir}/build/share/doc/jemalloc/jemalloc.html ]; then + /usr/bin/install -c -d /usr/local/share/doc/jemalloc + /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + fi + if [ -f ${binary_dir}/build/share/man/man3/jemalloc.3 ]; then + /usr/bin/install -c -d /usr/local/share/man/man3 + /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + fi + fi +} function install_lib() { # Remove links @@ -183,12 +226,12 @@ function install_lib() { if [ "$osType" != "Darwin" ]; then ${csudo} rm -f ${lib64_link_dir}/libtaos.* || : fi - + if [ "$osType" != "Darwin" ]; then ${csudo} cp ${binary_dir}/build/lib/libtaos.so.${verNumber} ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo} ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - + if [ -d "${lib64_link_dir}" ]; then ${csudo} ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 ${csudo} ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so @@ -198,7 +241,9 @@ function install_lib() { ${csudo} ln -sf ${install_main_dir}/driver/libtaos.1.dylib ${lib_link_dir}/libtaos.1.dylib ${csudo} ln -sf ${lib_link_dir}/libtaos.1.dylib ${lib_link_dir}/libtaos.dylib fi - + + install_jemalloc + if [ "$osType" != "Darwin" ]; then ${csudo} ldconfig fi @@ -206,26 +251,26 @@ function install_lib() { function install_header() { - ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : - ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* + ${csudo} rm -f ${inc_link_dir}/taos.h ${inc_link_dir}/taoserror.h || : + ${csudo} cp -f ${source_dir}/src/inc/taos.h ${source_dir}/src/inc/taoserror.h ${install_main_dir}/include && ${csudo} chmod 644 ${install_main_dir}/include/* ${csudo} ln -s ${install_main_dir}/include/taos.h ${inc_link_dir}/taos.h ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : - - if [ ! -f ${cfg_install_dir}/taos.cfg ]; then + + if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} mkdir -p ${cfg_install_dir} [ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/* - fi - + fi + ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg + ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg } -function install_log() { +function install_log() { ${csudo} rm -rf ${log_dir} || : if [ "$osType" != "Darwin" ]; then @@ -239,7 +284,7 @@ function install_log() { function install_data() { ${csudo} mkdir -p ${data_dir} - ${csudo} ln -s ${data_dir} ${install_main_dir}/data + ${csudo} ln -s ${data_dir} ${install_main_dir}/data } function install_connector() { @@ -254,8 +299,8 @@ function install_connector() { echo "WARNING: go connector not found, please check if want to use it!" fi ${csudo} cp -rf ${source_dir}/src/connector/python ${install_main_dir}/connector - - ${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null + + ${csudo} cp ${binary_dir}/build/lib/*.jar ${install_main_dir}/connector &> /dev/null && ${csudo} chmod 777 ${install_main_dir}/connector/*.jar || echo &> /dev/null } function install_examples() { @@ -264,8 +309,8 @@ function install_examples() { function clean_service_on_sysvinit() { #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" - #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : - + #${csudo} sed -i "\|${restart_config_str}|d" /etc/inittab || : + if pidof taosd &> /dev/null; then ${csudo} service taosd stop || : fi @@ -277,9 +322,9 @@ function clean_service_on_sysvinit() { elif ((${initd_mod}==3)); then ${csudo} update-rc.d -f taosd remove || : fi - + ${csudo} rm -f ${service_config_dir}/taosd || : - + if $(which init &> /dev/null); then ${csudo} init q || : fi @@ -298,10 +343,10 @@ function install_service_on_sysvinit() { ${csudo} cp -f ${script_dir}/../rpm/taosd ${install_main_dir}/init.d ${csudo} cp ${script_dir}/../rpm/taosd ${service_config_dir} && ${csudo} chmod a+x ${service_config_dir}/taosd fi - + #restart_config_str="taos:2345:respawn:${service_config_dir}/taosd start" #${csudo} grep -q -F "$restart_config_str" /etc/inittab || ${csudo} bash -c "echo '${restart_config_str}' >> /etc/inittab" - + if ((${initd_mod}==1)); then ${csudo} chkconfig --add taosd || : ${csudo} chkconfig --level 2345 taosd on || : @@ -323,7 +368,7 @@ function clean_service_on_systemd() { ${csudo} systemctl disable taosd &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} -} +} # taos:2345:respawn:/etc/init.d/taosd start @@ -383,7 +428,7 @@ function update_TDengine() { sleep 1 fi fi - + install_main_path install_log @@ -431,16 +476,16 @@ function install_TDengine() { # Start to install if [ "$osType" != "Darwin" ]; then echo -e "${GREEN}Start to install TDEngine...${NC}" - else - echo -e "${GREEN}Start to install TDEngine Client ...${NC}" + else + echo -e "${GREEN}Start to install TDEngine Client ...${NC}" fi - install_main_path + install_main_path - if [ "$osType" != "Darwin" ]; then + if [ "$osType" != "Darwin" ]; then install_data fi - install_log + install_log install_header install_lib install_connector @@ -452,7 +497,7 @@ function install_TDengine() { install_service fi - install_config + install_config if [ "$osType" != "Darwin" ]; then # Ask if to start the service diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index e4d2d71b01..624f72278a 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -30,12 +30,12 @@ else install_dir="${release_dir}/TDengine-server-${version}" fi -# Directories and files. +# Directories and files if [ "$pagMode" == "lite" ]; then - strip ${build_dir}/bin/taosd + strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh" -else +else bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/startPre.sh ${script_dir}/taosd-dump-cfg.gdb" fi @@ -73,10 +73,43 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/taos mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_deb} ${install_dir}/init.d/tarbitratord.deb || : mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}/init.d/tarbitratord.rpm || : +if [ -f ${build_dir}/bin/jemalloc-config ]; then + mkdir -p ${install_dir}/jemalloc/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} + cp ${build_dir}/bin/jemalloc-config ${install_dir}/jemalloc/bin + if [ -f ${build_dir}/bin/jemalloc.sh ]; then + cp ${build_dir}/bin/jemalloc.sh ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/bin/jeprof ]; then + cp ${build_dir}/bin/jeprof ${install_dir}/jemalloc/bin + fi + if [ -f ${build_dir}/include/jemalloc/jemalloc.h ]; then + cp ${build_dir}/include/jemalloc/jemalloc.h ${install_dir}/jemalloc/include/jemalloc + fi + if [ -f ${build_dir}/lib/libjemalloc.so.2 ]; then + cp ${build_dir}/lib/libjemalloc.so.2 ${install_dir}/jemalloc/lib + ln -sf libjemalloc.so.2 ${install_dir}/jemalloc/lib/libjemalloc.so + fi + if [ -f ${build_dir}/lib/libjemalloc.a ]; then + cp ${build_dir}/lib/libjemalloc.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/libjemalloc_pic.a ]; then + cp ${build_dir}/lib/libjemalloc_pic.a ${install_dir}/jemalloc/lib + fi + if [ -f ${build_dir}/lib/pkgconfig/jemalloc.pc ]; then + cp ${build_dir}/lib/pkgconfig/jemalloc.pc ${install_dir}/jemalloc/lib/pkgconfig + fi + if [ -f ${build_dir}/share/doc/jemalloc/jemalloc.html ]; then + cp ${build_dir}/share/doc/jemalloc/jemalloc.html ${install_dir}/jemalloc/share/doc/jemalloc + fi + if [ -f ${build_dir}/share/man/man3/jemalloc.3 ]; then + cp ${build_dir}/share/man/man3/jemalloc.3 ${install_dir}/jemalloc/share/man/man3 + fi +fi + if [ "$verMode" == "cluster" ]; then sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >> remove_temp.sh mv remove_temp.sh ${install_dir}/bin/remove.sh - + mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png rm -rf ${install_dir}/nginxd/png @@ -132,7 +165,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ -d ${examples_dir}/JDBC/taosdemo/target ]; then rm -rf ${examples_dir}/JDBC/taosdemo/target fi - + cp -r ${examples_dir}/JDBC ${install_dir}/examples cp -r ${examples_dir}/matlab ${install_dir}/examples cp -r ${examples_dir}/python ${install_dir}/examples @@ -142,7 +175,7 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp -r ${examples_dir}/C# ${install_dir}/examples fi # Copy driver -mkdir -p ${install_dir}/driver +mkdir -p ${install_dir}/driver cp ${lib_files} ${install_dir}/driver # Copy connector @@ -168,7 +201,7 @@ fi # exit 1 -cd ${release_dir} +cd ${release_dir} if [ "$verMode" == "cluster" ]; then pkg_name=${install_dir}-${osType}-${cpuType} @@ -185,8 +218,8 @@ fi if [ "$verType" == "beta" ]; then pkg_name=${pkg_name}-${verType} -elif [ "$verType" == "stable" ]; then - pkg_name=${pkg_name} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} else echo "unknow verType, nor stabel or beta" exit 1 diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index dd18f00920..f8d8f88438 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -10,8 +10,15 @@ INCLUDE_DIRECTORIES(${TD_ENTERPRISE_DIR}/src/inc) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) + SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") +ELSE () + SET(LINK_JEMALLOC "") +ENDIF () + ADD_EXECUTABLE(taosd ${SRC}) -TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4 balance sync) +TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4 balance sync ${LINK_JEMALLOC}) IF (TD_SOMODE_STATIC) TARGET_LINK_LIBRARIES(taosd taos_static) diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index d36c1e3fcc..d904945435 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -11,10 +11,17 @@ IF (TD_LINUX) LIST(REMOVE_ITEM SRC ./src/shellDarwin.c) ADD_EXECUTABLE(shell ${SRC}) +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) + SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") +ELSE () + SET(LINK_JEMALLOC "") +ENDIF () + IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(shell taos_static) + TARGET_LINK_LIBRARIES(shell taos_static ${LINK_JEMALLOC}) ELSE () - TARGET_LINK_LIBRARIES(shell taos) + TARGET_LINK_LIBRARIES(shell taos ${LINK_JEMALLOC}) ENDIF () SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index 5f75be0e19..091eecfe27 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -55,14 +55,21 @@ ENDIF () MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib -ljemalloc) + SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc") +ELSE () + SET(LINK_JEMALLOC "") +ENDIF () + IF (TD_LINUX) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson ${LINK_JEMALLOC}) ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson) + TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC}) ENDIF () ELSEIF (TD_WINDOWS) AUX_SOURCE_DIRECTORY(. SRC) @@ -71,7 +78,7 @@ ELSEIF (TD_WINDOWS) IF (TD_SOMODE_STATIC) TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson}) + TARGET_LINK_LIBRARIES(taosdemo taos cJson) ENDIF () ELSEIF (TD_DARWIN) # missing a few dependencies, such as diff --git a/src/os/inc/osMemory.h b/src/os/inc/osMemory.h index 2cf7e14d2f..12d5c5ad78 100644 --- a/src/os/inc/osMemory.h +++ b/src/os/inc/osMemory.h @@ -22,6 +22,10 @@ extern "C" { #endif +#ifdef TD_JEMALLOC_ENABLED +#include +#endif + typedef enum { TAOS_ALLOC_MODE_DEFAULT = 0, TAOS_ALLOC_MODE_RANDOM_FAIL = 1, From 592278a65613ae73e85906275b0aced70f0943c5 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 23 Jun 2021 23:38:45 +0800 Subject: [PATCH 26/37] fix compile error in test --- cmake/env.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/env.inc b/cmake/env.inc index 356bd61442..fa15ec6aee 100755 --- a/cmake/env.inc +++ b/cmake/env.inc @@ -39,7 +39,7 @@ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS} SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}") # Set c++ compiler options -SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11") +SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function") SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}") SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}") From 5cadc8242fd74222d6814e4a0c99dd391e09986f Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 24 Jun 2021 10:03:46 +0800 Subject: [PATCH 27/37] fix compile errors in CI --- src/query/tests/astTest.cpp | 2 +- src/query/tests/histogramTest.cpp | 4 +++- src/query/tests/patternMatchTest.cpp | 3 ++- src/query/tests/percentileTest.cpp | 3 ++- src/query/tests/resultBufferTest.cpp | 3 ++- src/query/tests/tsBufTest.cpp | 4 +++- src/query/tests/unitTest.cpp | 9 ++++++--- 7 files changed, 19 insertions(+), 9 deletions(-) diff --git a/src/query/tests/astTest.cpp b/src/query/tests/astTest.cpp index 7bd1c0bf8e..1143d00e8d 100644 --- a/src/query/tests/astTest.cpp +++ b/src/query/tests/astTest.cpp @@ -10,7 +10,7 @@ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wwrite-strings" -#pragma GCC diagnostic ignored "-Werror=unused-function" +#pragma GCC diagnostic ignored "-Wunused-function" typedef struct ResultObj { int32_t numOfResult; diff --git a/src/query/tests/histogramTest.cpp b/src/query/tests/histogramTest.cpp index 44a31f4241..0266ecffc1 100644 --- a/src/query/tests/histogramTest.cpp +++ b/src/query/tests/histogramTest.cpp @@ -6,7 +6,9 @@ #include "taos.h" #include "qHistogram.h" -#pragma GCC diagnostic ignored "-Werror=unused-function" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" + namespace { void doHistogramAddTest() { SHistogramInfo* pHisto = NULL; diff --git a/src/query/tests/patternMatchTest.cpp b/src/query/tests/patternMatchTest.cpp index cd242afc84..091604c65c 100644 --- a/src/query/tests/patternMatchTest.cpp +++ b/src/query/tests/patternMatchTest.cpp @@ -6,7 +6,8 @@ #include "qAggMain.h" #include "tcompare.h" -#pragma GCC diagnostic ignored "-Werror=unused-function" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" TEST(testCase, patternMatchTest) { SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; diff --git a/src/query/tests/percentileTest.cpp b/src/query/tests/percentileTest.cpp index 952129c8e7..1b6951201a 100644 --- a/src/query/tests/percentileTest.cpp +++ b/src/query/tests/percentileTest.cpp @@ -7,7 +7,8 @@ #include "qPercentile.h" -#pragma GCC diagnostic ignored "-Werror=unused-function" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" namespace { tMemBucket *createBigIntDataBucket(int32_t start, int32_t end) { diff --git a/src/query/tests/resultBufferTest.cpp b/src/query/tests/resultBufferTest.cpp index 052e654066..54ac0bf4e5 100644 --- a/src/query/tests/resultBufferTest.cpp +++ b/src/query/tests/resultBufferTest.cpp @@ -6,7 +6,8 @@ #include "taos.h" #include "tsdb.h" -#pragma GCC diagnostic ignored "-Werror=unused-function" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" namespace { // simple test diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index 30faed1bc2..04c5a15252 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -9,7 +9,9 @@ #include "ttoken.h" #include "tutil.h" -#pragma GCC diagnostic ignored "-Werror=unused-function" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" namespace { /** diff --git a/src/query/tests/unitTest.cpp b/src/query/tests/unitTest.cpp index 75153a76ae..fcfed49140 100644 --- a/src/query/tests/unitTest.cpp +++ b/src/query/tests/unitTest.cpp @@ -6,14 +6,17 @@ #include "taos.h" #include "tsdb.h" +#pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#pragma GCC diagnostic ignored "-Wsign-compare" + #include "../../client/inc/tscUtil.h" #include "tutil.h" #include "tvariant.h" #include "ttokendef.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wwrite-strings" -#pragma GCC diagnostic ignored "-Werror=unused-function" namespace { int32_t testValidateName(char* name) { SStrToken token = {0}; From 069169e7c1d2209f543f838ea54631929aac9f1c Mon Sep 17 00:00:00 2001 From: Linhe Huo Date: Thu, 24 Jun 2021 15:04:02 +0800 Subject: [PATCH 28/37] WIP: [TD-4872]: fix buffer overflow in -O3 build (#6593) * [TD-4872]: fix buffer overflow in -O3 build * [TD-4872]: fix tasodemo buffer overflow with -O3 * [TD-4872]: fix tasodump buffer overflow with -O3 --- src/kit/taosdemo/taosdemo.c | 2 +- src/kit/taosdump/taosdump.c | 8 +++++--- src/tfs/src/tfs.c | 4 +++- src/util/src/tconfig.c | 2 +- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 40168e5e97..a6ba7e9493 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5103,7 +5103,7 @@ static int32_t generateStbDataTail( } else { retLen = getRowDataFromSample( data, - remainderBufLen, + remainderBufLen < MAX_DATA_SIZE ? remainderBufLen : MAX_DATA_SIZE, startTime + superTblInfo->timeStampStep * k, superTblInfo, pSamplePos); diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 165bbdf990..05c6b1efbb 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -29,6 +29,9 @@ #define COMMAND_SIZE 65536 //#define DEFAULT_DUMP_FILE "taosdump.sql" +// for strncpy buffer overflow +#define min(a, b) (((a) < (b)) ? (a) : (b)) + int converStringToReadable(char *str, int size, char *buf, int bufsize); int convertNCharToReadable(char *str, int size, char *buf, int bufsize); void taosDumpCharset(FILE *fp); @@ -1119,12 +1122,11 @@ int taosGetTableDes( TAOS_FIELD *fields = taos_fetch_fields(res); tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); - while ((row = taos_fetch_row(res)) != NULL) { strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes)); tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); @@ -1575,7 +1577,7 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], - fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + min(TSDB_TABLE_NAME_LEN, fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes)); taosWrite(fd, &tableRecord, sizeof(STableRecord)); diff --git a/src/tfs/src/tfs.c b/src/tfs/src/tfs.c index f78535b8ed..9dc68dcdfd 100644 --- a/src/tfs/src/tfs.c +++ b/src/tfs/src/tfs.c @@ -480,11 +480,13 @@ static int tfsFormatDir(char *idir, char *odir) { return -1; } - if (realpath(wep.we_wordv[0], odir) == NULL) { + char tmp[PATH_MAX] = {0}; + if (realpath(wep.we_wordv[0], tmp) == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); wordfree(&wep); return -1; } + strcpy(odir, tmp); wordfree(&wep); return 0; diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index c4bd577602..442e83bb4f 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -151,7 +151,7 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { wordfree(&full_path); - char tmp[1025] = {0}; + char tmp[PATH_MAX] = {0}; if (realpath(option, tmp) != NULL) { strcpy(option, tmp); } From 28a4d4c0308c8d84684955d979ca579c2473d8bd Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 24 Jun 2021 16:16:33 +0800 Subject: [PATCH 29/37] [TD-1568]fix tdMergeDataCols compare key bug --- src/common/inc/tdataformat.h | 5 +++++ src/common/src/tdataformat.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 8ee7329156..e3989a1deb 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -289,6 +289,11 @@ static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) { } } +static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int row) { + ASSERT(row < pCols->numOfRows); + return dataColsKeyAt(pCols, row); +} + static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) { if (pCols->numOfRows) { return dataColsKeyAt(pCols, 0); diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 7ae34d532c..94c429cfc0 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -452,7 +452,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * SDataCols *pTarget = NULL; - if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyFirst(source))) { // No overlap + if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); for (int i = 0; i < rowsToMerge; i++) { for (int j = 0; j < source->numOfCols; j++) { From e143c34c7b60335e13b03cb318c9fe3b528b1db1 Mon Sep 17 00:00:00 2001 From: tomchon Date: Thu, 24 Jun 2021 17:59:13 +0800 Subject: [PATCH 30/37] comment "nestedQuery/queryInterval.py" --- tests/pytest/fulltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 37fb3acdfa..c66ccc5477 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -235,7 +235,7 @@ python3 ./test.py -f query/queryTscomputWithNow.py python3 ./test.py -f query/computeErrorinWhere.py python3 ./test.py -f query/queryTsisNull.py python3 ./test.py -f query/subqueryFilter.py -python3 ./test.py -f query/nestedQuery/queryInterval.py +# python3 ./test.py -f query/nestedQuery/queryInterval.py python3 ./test.py -f query/queryStateWindow.py From eb0f1a39e7372e736cd5521247e0f27c0826a78b Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 24 Jun 2021 20:49:01 +0800 Subject: [PATCH 31/37] Hotfix/sangshuduo/td 4892 taosdemo sub fetch for develop (#6609) * [TD-4892]: taosdemo subscribe fetch result. for develop branch * fix stbname length. * restrict prefix length. * submit empty * fix minor code. --- src/kit/taosdemo/taosdemo.c | 127 ++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 71 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index a6ba7e9493..751f6bc07a 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -79,10 +79,9 @@ enum TEST_MODE { #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) -#define COND_BUF_LEN BUFFER_SIZE - 30 +#define COND_BUF_LEN (BUFFER_SIZE - 30) #define MAX_USERNAME_SIZE 64 #define MAX_PASSWORD_SIZE 64 -#define MAX_DB_NAME_SIZE 64 #define MAX_HOSTNAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space @@ -90,7 +89,7 @@ enum TEST_MODE { #define OPT_ABORT 1 /* –abort */ #define STRING_LEN 60000 #define MAX_PREPARED_RAND 1000000 -#define MAX_FILE_NAME_LEN 128 +#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. #define MAX_SAMPLES_ONCE_FROM_FILE 10000 #define MAX_NUM_DATATYPE 10 @@ -195,13 +194,6 @@ enum _describe_table_index { TSDB_MAX_DESCRIBE_METRIC }; -typedef struct { - char field[TSDB_COL_NAME_LEN + 1]; - char type[16]; - int length; - char note[128]; -} SColDes; - /* Used by main to communicate with parse_opt. */ static char *g_dupstr = NULL; @@ -247,16 +239,16 @@ typedef struct SArguments_S { } SArguments; typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN + 1]; - char dataType[MAX_TB_NAME_SIZE]; + char field[TSDB_COL_NAME_LEN]; + char dataType[16]; uint32_t dataLen; char note[128]; } StrColumn; typedef struct SSuperTable_S { - char sTblName[MAX_TB_NAME_SIZE+1]; - char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample - char childTblPrefix[MAX_TB_NAME_SIZE]; + char sTblName[TSDB_TABLE_NAME_LEN]; + char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample + char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest uint16_t childTblExists; int64_t childTblCount; @@ -277,8 +269,8 @@ typedef struct SSuperTable_S { int64_t timeStampStep; char startTimestamp[MAX_TB_NAME_SIZE]; char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json - char sampleFile[MAX_FILE_NAME_LEN+1]; - char tagsFile[MAX_FILE_NAME_LEN+1]; + char sampleFile[MAX_FILE_NAME_LEN]; + char tagsFile[MAX_FILE_NAME_LEN]; uint32_t columnCount; StrColumn columns[MAX_COLUMN_COUNT]; @@ -305,7 +297,7 @@ typedef struct SSuperTable_S { } SSuperTable; typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; + char name[TSDB_DB_NAME_LEN]; char create_time[32]; int64_t ntables; int32_t vgroups; @@ -341,11 +333,11 @@ typedef struct SDbCfg_S { int cache; int blocks; int quorum; - char precision[MAX_TB_NAME_SIZE]; + char precision[8]; } SDbCfg; typedef struct SDataBase_S { - char dbName[MAX_DB_NAME_SIZE]; + char dbName[TSDB_DB_NAME_LEN]; bool drop; // 0: use exists, 1: if exists, drop then new create SDbCfg dbCfg; uint64_t superTblCount; @@ -353,14 +345,14 @@ typedef struct SDataBase_S { } SDataBase; typedef struct SDbs_S { - char cfgDir[MAX_FILE_NAME_LEN+1]; + char cfgDir[MAX_FILE_NAME_LEN]; char host[MAX_HOSTNAME_SIZE]; struct sockaddr_in serv_addr; uint16_t port; char user[MAX_USERNAME_SIZE]; char password[MAX_PASSWORD_SIZE]; - char resultFile[MAX_FILE_NAME_LEN+1]; + char resultFile[MAX_FILE_NAME_LEN]; bool use_metric; bool insert_only; bool do_aggreFunc; @@ -387,7 +379,7 @@ typedef struct SpecifiedQueryInfo_S { bool subscribeRestart; int subscribeKeepProgress; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume[MAX_QUERY_SQL_COUNT]; int endAfterConsume[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; @@ -398,7 +390,7 @@ typedef struct SpecifiedQueryInfo_S { } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { - char sTblName[MAX_TB_NAME_SIZE+1]; + char sTblName[TSDB_TABLE_NAME_LEN]; uint64_t queryInterval; // 0: unlimit > 0 loop/s uint32_t threadCnt; uint32_t asyncMode; // 0: sync, 1: async @@ -407,10 +399,10 @@ typedef struct SuperQueryInfo_S { int subscribeKeepProgress; uint64_t queryTimes; int64_t childTblCount; - char childTblPrefix[MAX_TB_NAME_SIZE]; + char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq int sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume; int endAfterConsume; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; @@ -420,13 +412,13 @@ typedef struct SuperQueryInfo_S { } SuperQueryInfo; typedef struct SQueryMetaInfo_S { - char cfgDir[MAX_FILE_NAME_LEN+1]; + char cfgDir[MAX_FILE_NAME_LEN]; char host[MAX_HOSTNAME_SIZE]; uint16_t port; struct sockaddr_in serv_addr; char user[MAX_USERNAME_SIZE]; char password[MAX_PASSWORD_SIZE]; - char dbName[MAX_DB_NAME_SIZE+1]; + char dbName[TSDB_DB_NAME_LEN]; char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest SpecifiedQueryInfo specifiedQueryInfo; @@ -438,11 +430,11 @@ typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; int threadID; - char db_name[MAX_DB_NAME_SIZE+1]; + char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; char filePath[4096]; FILE *fp; - char tb_prefix[MAX_TB_NAME_SIZE]; + char tb_prefix[TSDB_TABLE_NAME_LEN]; uint64_t start_table_from; uint64_t end_table_to; int64_t ntables; @@ -608,7 +600,7 @@ SArguments g_args = { 1, // query_times 0, // interlace_rows; 30000, // num_of_RPR - (1024*1024), // max_sql_len + (1024*1024), // max_sql_len 10000, // num_of_tables 10000, // num_of_DPT 0, // abort @@ -3035,7 +3027,7 @@ static int startMultiThreadCreateChildTable( for (int64_t i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE); + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->superTblInfo = superTblInfo; verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); pThreadInfo->taos = taos_connect( @@ -3326,7 +3318,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1); cJSON* dataLen = cJSON_GetObjectItem(column, "len"); if (dataLen && dataLen->type == cJSON_Number) { @@ -3341,7 +3333,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( for (int n = 0; n < count; ++n) { tstrncpy(superTbls->columns[index].dataType, - columnCase.dataType, MAX_TB_NAME_SIZE); + columnCase.dataType, strlen(columnCase.dataType) + 1); superTbls->columns[index].dataLen = columnCase.dataLen; index++; } @@ -3397,7 +3389,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( __func__, __LINE__); goto PARSE_OVER; } - tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1); cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); if (dataLen && dataLen->type == cJSON_Number) { @@ -3412,7 +3404,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( for (int n = 0; n < count; ++n) { tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, - MAX_TB_NAME_SIZE); + strlen(columnCase.dataType) + 1); superTbls->tags[index].dataLen = columnCase.dataLen; index++; } @@ -3635,7 +3627,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, db name not found\n"); goto PARSE_OVER; } - tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { @@ -3656,10 +3648,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (precision && precision->type == cJSON_String && precision->valuestring != NULL) { tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, - MAX_DB_NAME_SIZE); + 8); } else if (!precision) { - //tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); - memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); + memset(g_Dbs.db[i].dbCfg.precision, 0, 8); } else { printf("ERROR: failed to read json, precision not found\n"); goto PARSE_OVER; @@ -3836,7 +3827,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, - MAX_TB_NAME_SIZE); + TSDB_TABLE_NAME_LEN); cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { @@ -3844,7 +3835,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, - MAX_DB_NAME_SIZE); + TSDB_TABLE_NAME_LEN - 20); cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); if (autoCreateTbl @@ -3912,9 +3903,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (dataSource && dataSource->type == cJSON_String && dataSource->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, - dataSource->valuestring, MAX_DB_NAME_SIZE); + dataSource->valuestring, TSDB_DB_NAME_LEN); } else if (!dataSource) { - tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", TSDB_DB_NAME_LEN); } else { errorPrint("%s() LN%d, failed to read json, data_source not found\n", __func__, __LINE__); @@ -3972,10 +3963,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - ts->valuestring, MAX_DB_NAME_SIZE); + ts->valuestring, TSDB_DB_NAME_LEN); } else if (!ts) { tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - "now", MAX_DB_NAME_SIZE); + "now", TSDB_DB_NAME_LEN); } else { printf("ERROR: failed to read json, start_timestamp not found\n"); goto PARSE_OVER; @@ -3995,9 +3986,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, - sampleFormat->valuestring, MAX_DB_NAME_SIZE); + sampleFormat->valuestring, TSDB_DB_NAME_LEN); } else if (!sampleFormat) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", TSDB_DB_NAME_LEN); } else { printf("ERROR: failed to read json, sample_format not found\n"); goto PARSE_OVER; @@ -4242,7 +4233,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* dbs = cJSON_GetObjectItem(root, "databases"); if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - tstrncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); } else if (!dbs) { printf("ERROR: failed to read json, databases not found\n"); goto PARSE_OVER; @@ -4492,7 +4483,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, - MAX_TB_NAME_SIZE); + TSDB_TABLE_NAME_LEN); } else { errorPrint("%s() LN%d, failed to read json, super table name input error\n", __func__, __LINE__); @@ -6417,7 +6408,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE); + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->time_precision = timePrec; pThreadInfo->superTblInfo = superTblInfo; @@ -6861,7 +6852,7 @@ static void *specifiedTableQuery(void *sarg) { } } - char sqlStr[MAX_DB_NAME_SIZE + 5]; + char sqlStr[TSDB_DB_NAME_LEN + 5]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(pThreadInfo->taos); @@ -7337,12 +7328,6 @@ static void *superSubscribe(void *sarg) { performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st)); if (res) { - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - fetchResult(res, pThreadInfo); - } if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], @@ -7449,10 +7434,10 @@ static void *specifiedSubscribe(void *sarg) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - fetchResult( - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], - pThreadInfo); } + fetchResult( + g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], + pThreadInfo); g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) @@ -7689,9 +7674,9 @@ static void setParaFromArg(){ g_Dbs.dbCount = 1; g_Dbs.db[0].drop = true; - tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN); g_Dbs.db[0].dbCfg.replica = g_args.replica; - tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", 8); tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); @@ -7713,7 +7698,7 @@ static void setParaFromArg(){ if (g_args.use_metric) { g_Dbs.db[0].superTblCount = 1; - tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN); g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; g_Dbs.threadCount = g_args.num_of_threads; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; @@ -7724,7 +7709,7 @@ static void setParaFromArg(){ g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, - g_args.tb_prefix, MAX_TB_NAME_SIZE); + g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20); tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].iface = g_args.iface; tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, @@ -7741,7 +7726,7 @@ static void setParaFromArg(){ } tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - data_type[i], MAX_TB_NAME_SIZE); + data_type[i], strlen(data_type[i]) + 1); g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; g_Dbs.db[0].superTbls[0].columnCount++; } @@ -7752,18 +7737,18 @@ static void setParaFromArg(){ for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) { tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - "INT", MAX_TB_NAME_SIZE); + "INT", strlen("INT") + 1); g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; g_Dbs.db[0].superTbls[0].columnCount++; } } tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, - "INT", MAX_TB_NAME_SIZE); + "INT", strlen("INT") + 1); g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, - "BINARY", MAX_TB_NAME_SIZE); + "BINARY", strlen("BINARY") + 1); g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; g_Dbs.db[0].superTbls[0].tagCount = 2; } else { @@ -7899,11 +7884,11 @@ static void queryResult() { pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; tstrncpy(pThreadInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].childTblPrefix, TSDB_TABLE_NAME_LEN - 20); } else { pThreadInfo->ntables = g_args.num_of_tables; pThreadInfo->end_table_to = g_args.num_of_tables -1; - tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); + tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); } pThreadInfo->taos = taos_connect( From 8fc2b3d79d6e8e07871bef2380470f23b504c4ff Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 24 Jun 2021 22:04:29 +0800 Subject: [PATCH 32/37] Hotfix/sangshuduo/td 4823 taosdemo gettablename for develop (#6605) * [TD-4823]: taosdemo getTableName return empty. * fix typo. * check table name is empty in early stage. Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 751f6bc07a..38f0ecb219 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2493,6 +2493,13 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* pTblName = childTblName; while((row = taos_fetch_row(res)) != NULL) { int32_t* len = taos_fetch_lengths(res); + + if (0 == strlen((char *)row[0])) { + errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", + __func__, __LINE__, count); + exit(-1); + } + tstrncpy(pTblName, (char *)row[0], len[0]+1); //printf("==== sub table name: %s\n", pTblName); count++; @@ -6293,16 +6300,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } - // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, - "sample", strlen("sample")))) { - if (0 != prepareSampleDataForSTable(superTblInfo)) { - errorPrint("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); - exit(-1); - } - } - TAOS* taos0 = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); From 53f12701f7cc55df65ea038cc0a67ad1149907cf Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Fri, 25 Jun 2021 09:07:12 +0800 Subject: [PATCH 33/37] add test case to verify taosdump with binary and nchar data --- tests/pytest/tools/taosdumpTest.py | 2 + tests/pytest/tools/taosdumpTest2.py | 74 +++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 tests/pytest/tools/taosdumpTest2.py diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py index 534a477b34..8746f4ecdf 100644 --- a/tests/pytest/tools/taosdumpTest.py +++ b/tests/pytest/tools/taosdumpTest.py @@ -45,7 +45,9 @@ class TDTestCase: for i in range(100): sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100) tdSql.execute(sql) + + os.system("rm /tmp/*.sql") os.system("taosdump --databases db -o /tmp") tdSql.execute("drop database db") diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py new file mode 100644 index 0000000000..51a73555a8 --- /dev/null +++ b/tests/pytest/tools/taosdumpTest2.py @@ -0,0 +1,74 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1601481600000 + self.numberOfTables = 1 + self.numberOfRecords = 15000 + + def run(self): + tdSql.prepare() + + tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)") + tdSql.execute("create table t1 using st tags(0)") + currts = self.ts + finish = 0 + while(finish < self.numberOfRecords): + sql = "insert into t1 values" + for i in range(finish, self.numberOfRecords): + sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i) + finish = i + 1 + if (1048576 - len(sql)) < 16384: + break + tdSql.execute(sql) + + os.system("rm /tmp/*.sql") + os.system("taosdump --databases db -o /tmp -B 32766 -L 1048576") + + tdSql.execute("drop database db") + tdSql.query("show databases") + tdSql.checkRows(0) + + os.system("taosdump -i /tmp") + + tdSql.query("show databases") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'db') + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("select count(*) from t1") + tdSql.checkData(0, 0, self.numberOfRecords) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 757642507f751a080a06d93c59ded7cf396e6eb4 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 25 Jun 2021 12:11:17 +0800 Subject: [PATCH 34/37] Update README.md (#6615) --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 0e1adcd97c..89e35f6e63 100644 --- a/README.md +++ b/README.md @@ -110,6 +110,12 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` +You can use Jemalloc as memory allocator instead of glibc: +``` +apt install autoconf +cmake .. -DJEMALLOC_ENABLED=true +``` + TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform. You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct: From 2b879fb158745ddac6b44a1fa97b1df57f906276 Mon Sep 17 00:00:00 2001 From: happyguoxy Date: Fri, 25 Jun 2021 13:49:07 +0800 Subject: [PATCH 35/37] [TD4824] --- tests/pytest/insert/in_function.py | 580 ++++++++++++++++++++++++----- 1 file changed, 484 insertions(+), 96 deletions(-) diff --git a/tests/pytest/insert/in_function.py b/tests/pytest/insert/in_function.py index 263c8a78aa..3f2e1a03ca 100644 --- a/tests/pytest/insert/in_function.py +++ b/tests/pytest/insert/in_function.py @@ -18,7 +18,6 @@ from util.log import * from util.cases import * from util.sql import * - class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -27,6 +26,7 @@ class TDTestCase: def run(self): tdSql.prepare() # test case for https://jira.taosdata.com:18080/browse/TD-4568 + # test case for https://jira.taosdata.com:18080/browse/TD-4824 tdLog.info("=============== step1,check bool and tinyint data type") @@ -137,8 +137,28 @@ class TDTestCase: tdSql.checkData(0,1,'True') tdSql.checkData(0,2,'0') + tdLog.info("=============== step1.3,multiple column and multiple tag check in function") + cmd1 = '''select * from in_stable_1 + where in_bool in (true,false) and in_tinyint in (0,127,-127) + and tin_bool in (true,false) and tin_tinyint in (0,127,-127) + order by ts desc ;''' + tdLog.info(cmd1) + tdSql.query(cmd1) + tdSql.checkData(0,1,'True') + tdSql.checkData(0,2,'0') + tdSql.checkData(0,3,'False') + tdSql.checkData(0,4,'0') + tdSql.checkData(1,1,'False') + tdSql.checkData(1,2,'127') + tdSql.checkData(1,3,'False') + tdSql.checkData(1,4,'-127') + tdSql.checkData(2,1,'True') + tdSql.checkData(2,2,'-127') + tdSql.checkData(2,3,'True') + tdSql.checkData(2,4,'127') - tdLog.info("=============== step1.3,drop normal table && create table") + + tdLog.info("=============== step1.4,drop normal table && create table") cmd1 = 'drop table if exists normal_in_bool_tinyint_1 ;' cmd2 = 'create table normal_in_bool_tinyint_1 (ts timestamp,in_bool bool,in_tinyint tinyint) ; ' tdLog.info(cmd1) @@ -147,7 +167,7 @@ class TDTestCase: tdSql.execute(cmd2) - tdLog.info("=============== step1.4,insert normal table right data and check in function") + tdLog.info("=============== step1.5,insert normal table right data and check in function") cmd1 = 'insert into normal_in_bool_tinyint_1 values(now,\'true\',\'-127\') ;' tdLog.info(cmd1) tdSql.execute(cmd1) @@ -175,6 +195,17 @@ class TDTestCase: tdSql.checkData(0,1,'True') tdSql.checkData(0,2,'0') + cmd4 = '''select * from normal_in_bool_tinyint_1 + where in_bool in (true,false) and in_tinyint in (0,127,-127) + order by ts desc ;''' + tdLog.info(cmd4) + tdSql.query(cmd4) + tdSql.checkData(0,1,'True') + tdSql.checkData(0,2,'0') + tdSql.checkData(1,1,'False') + tdSql.checkData(1,2,'127') + tdSql.checkData(2,1,'True') + tdSql.checkData(2,2,'-127') tdLog.info("=============== step2,check int、smallint and bigint data type") @@ -378,10 +409,39 @@ class TDTestCase: tdSql.query('select * from in_int_smallint_bigint_3 where in_big in (-9223372036854775807) order by ts desc') tdSql.checkData(0,1,'0') tdSql.checkData(0,2,'32767') - tdSql.checkData(0,3,'-9223372036854775807') + tdSql.checkData(0,3,'-9223372036854775807') - tdLog.info("=============== step2.3,drop normal table && create table") + tdLog.info("=============== step2.3,multiple column and multiple tag check in function") + cmd1 = '''select * from in_stable_2 + where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767) + and in_big in (0,9223372036854775807,-9223372036854775807) + and tin_int in (0,2147483647,-2147483647) and tin_small in (0,32767,-32767) + and tin_big in (0,9223372036854775807,-9223372036854775807) + order by ts desc ;''' + tdLog.info(cmd1) + tdSql.query(cmd1) + tdSql.checkData(0,1,'0') + tdSql.checkData(0,2,'32767') + tdSql.checkData(0,3,'-9223372036854775807') + tdSql.checkData(0,4,'0') + tdSql.checkData(0,5,'32767') + tdSql.checkData(0,6,'-9223372036854775807') + tdSql.checkData(1,1,'-2147483647') + tdSql.checkData(1,2,'0') + tdSql.checkData(1,3,'9223372036854775807') + tdSql.checkData(1,4,'-2147483647') + tdSql.checkData(1,5,'0') + tdSql.checkData(1,6,'9223372036854775807') + tdSql.checkData(2,1,'2147483647') + tdSql.checkData(2,2,'-32767') + tdSql.checkData(2,3,'0') + tdSql.checkData(2,4,'2147483647') + tdSql.checkData(2,5,'-32767') + tdSql.checkData(2,6,'0') + + + tdLog.info("=============== step2.4,drop normal table && create table") cmd1 = 'drop table if exists normal_int_smallint_bigint_1 ;' cmd2 = 'create table normal_int_smallint_bigint_1 (ts timestamp,in_int int,in_small smallint , in_big bigint) ; ' tdLog.info(cmd1) @@ -390,7 +450,7 @@ class TDTestCase: tdSql.execute(cmd2) - tdLog.info("=============== step2.4,insert normal table right data and check in function") + tdLog.info("=============== step2.5,insert normal table right data and check in function") cmd1 = 'insert into normal_int_smallint_bigint_1 values(now,\'2147483647\',\'-32767\',\'0\') ;' tdLog.info(cmd1) tdSql.execute(cmd1) @@ -437,7 +497,23 @@ class TDTestCase: tdSql.query('select * from normal_int_smallint_bigint_1 where in_big in (-9223372036854775807) order by ts desc') tdSql.checkData(0,1,'0') tdSql.checkData(0,2,'32767') - tdSql.checkData(0,3,'-9223372036854775807') + tdSql.checkData(0,3,'-9223372036854775807') + + cmd4 = '''select * from normal_int_smallint_bigint_1 + where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767) + and in_big in (0,9223372036854775807,-9223372036854775807) + order by ts desc ;''' + tdLog.info(cmd4) + tdSql.query(cmd4) + tdSql.checkData(0,1,'0') + tdSql.checkData(0,2,'32767') + tdSql.checkData(0,3,'-9223372036854775807') + tdSql.checkData(1,1,'-2147483647') + tdSql.checkData(1,2,'0') + tdSql.checkData(1,3,'9223372036854775807') + tdSql.checkData(2,1,'2147483647') + tdSql.checkData(2,2,'-32767') + tdSql.checkData(2,3,'0') tdLog.info("=============== step3,check binary and nchar data type") @@ -560,7 +636,30 @@ class TDTestCase: tdSql.checkData(0,2,'北京涛思数据科技有限公司') - tdLog.info("=============== step3.3,drop normal table && create table") + tdLog.info("=============== step3.3,multiple column and multiple tag check in function") + cmd1 = '''select * from in_stable_3 + where in_binary in (\'0\',\'TDengine\',\'TAOS\') + and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\') + and tin_binary in (\'0\',\'TDengine\',\'taosdataTDengine\') + and tin_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'北京涛思数据科技有限公司TDengine\') + order by ts desc ;''' + tdLog.info(cmd1) + tdSql.query(cmd1) + tdSql.checkData(0,1,'TDengine') + tdSql.checkData(0,2,'北京涛思数据科技有限公司') + tdSql.checkData(0,3,'taosdataTDengine') + tdSql.checkData(0,4,'北京涛思数据科技有限公司TDengine') + tdSql.checkData(1,1,'TAOS') + tdSql.checkData(1,2,'涛思数据TAOSdata') + tdSql.checkData(1,3,'TDengine') + tdSql.checkData(1,4,'北京涛思数据科技有限公司') + tdSql.checkData(2,1,'0') + tdSql.checkData(2,2,'0') + tdSql.checkData(2,3,'0') + tdSql.checkData(2,4,'0') + + + tdLog.info("=============== step3.4,drop normal table && create table") cmd1 = 'drop table if exists normal_in_binary_nchar_1 ;' cmd2 = 'create table normal_in_binary_nchar_1 (ts timestamp,in_binary binary(8),in_nchar nchar(12)) ; ' tdLog.info(cmd1) @@ -569,7 +668,7 @@ class TDTestCase: tdSql.execute(cmd2) - tdLog.info("=============== step3.4,insert normal table right data and check in function") + tdLog.info("=============== step3.5,insert normal table right data and check in function") cmd1 = 'insert into normal_in_binary_nchar_1 values(now,\'0\',\'0\') ;' tdLog.info(cmd1) tdSql.execute(cmd1) @@ -598,124 +697,413 @@ class TDTestCase: tdSql.checkData(0,2,'北京涛思数据科技有限公司') tdSql.query('select * from normal_in_binary_nchar_1 where in_nchar in (\'北京涛思数据科技有限公司\') order by ts desc') tdSql.checkData(0,1,'TDengine') - tdSql.checkData(0,2,'北京涛思数据科技有限公司') + tdSql.checkData(0,2,'北京涛思数据科技有限公司') - tdLog.info("=============== step4,check float and double data type,not support") + cmd4 = '''select * from normal_in_binary_nchar_1 + where in_binary in (\'0\',\'TDengine\',\'TAOS\') + and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\') + order by ts desc ;''' + tdLog.info(cmd4) + tdSql.query(cmd4) + tdSql.checkData(0,1,'TDengine') + tdSql.checkData(0,2,'北京涛思数据科技有限公司') + tdSql.checkData(1,1,'TAOS') + tdSql.checkData(1,2,'涛思数据TAOSdata') + tdSql.checkData(2,1,'0') + tdSql.checkData(2,2,'0') + + + tdLog.info("=============== step4,check float and double data type") tdLog.info("=============== step4.1,drop table && create table") - cmd1 = 'drop table if exists in_float_double_1 ;' + cmd1 = 'drop table if exists in_ts_float_double_1 ;' + cmd2 = 'drop table if exists in_ts_float_double_2 ;' + cmd3 = 'drop table if exists in_ts_float_double_3 ;' cmd10 = 'drop table if exists in_stable_4 ;' - cmd11 = 'create stable in_stable_4(ts timestamp,in_float float,in_double double) tags (tin_float float,tin_double double) ;' - cmd12 = 'create table in_float_double_1 using in_stable_4 tags(\'666\',\'88888\') ; ' + cmd11 = 'create stable in_stable_4(ts timestamp,in_ts timestamp,in_float float,in_double double) tags (tin_ts timestamp,tin_float float,tin_double double) ;' + cmd12 = 'create table in_ts_float_double_1 using in_stable_4 tags(\'0\',\'0\',\'0\') ; ' + cmd13 = 'create table in_ts_float_double_2 using in_stable_4 tags(\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ; ' + cmd14 = 'create table in_ts_float_double_3 using in_stable_4 tags(\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ; ' tdLog.info(cmd1) tdSql.execute(cmd1) + tdLog.info(cmd2) + tdSql.execute(cmd2) + tdLog.info(cmd3) + tdSql.execute(cmd3) tdLog.info(cmd10) tdSql.execute(cmd10) tdLog.info(cmd11) tdSql.execute(cmd11) tdLog.info(cmd12) tdSql.execute(cmd12) + tdLog.info(cmd13) + tdSql.execute(cmd13) + tdLog.info(cmd14) + tdSql.execute(cmd14) tdLog.info("=============== step4.2,insert stable right data and check in function") - cmd1 = 'insert into in_float_double_1 values(now,\'888\',\'66666\') ;' + cmd1 = 'insert into in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;' tdLog.info(cmd1) - tdSql.execute(cmd1) + tdSql.execute(cmd1) + + tdSql.query('select * from in_stable_4 where in_ts in (\'0\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where in_float in (0.00000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where in_double in (0.000000000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + + tdSql.query('select * from in_stable_4 where tin_ts in (\'0\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where tin_ts in (\'1970-01-01 08:00:00.000\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where tin_float in (0.00000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where tin_double in (0.000000000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + + tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'0\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from in_ts_float_double_1 where in_float in (0.00000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from in_ts_float_double_1 where in_double in (0.000000000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) - cmd2 = 'select * from in_stable_4 where in_float in (\'888\');' + cmd2 = 'insert into in_ts_float_double_2 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;' tdLog.info(cmd2) - tdSql.error(cmd2) - try: - tdSql.execute(cmd2) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + tdSql.execute(cmd2) + + tdSql.query('select * from in_stable_4 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where in_ts in (\'1577836800001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where in_float in (666.00000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where in_double in (-88888.000000000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + + tdSql.query('select * from in_stable_4 where tin_ts in (\'2020-01-01 08:00:00.001000\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where tin_ts in (\'1577836800001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where tin_float in (666.00000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where tin_double in (-88888.000000000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + + tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'1577836800001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from in_ts_float_double_2 where in_float in (666.00000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from in_ts_float_double_2 where in_double in (-88888.000000000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) - cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');' + cmd3 = 'insert into in_ts_float_double_3 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;' tdLog.info(cmd3) - tdSql.error(cmd3) - try: - tdSql.execute(cmd3) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + tdSql.execute(cmd3) - cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');' - tdLog.info(cmd4) - tdSql.error(cmd4) - try: - tdSql.execute(cmd4) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + tdSql.query('select * from in_stable_4 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where in_ts in (\'1609459200001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where in_float in (-888.00000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where in_double in (66666.000000000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + + tdSql.query('select * from in_stable_4 where tin_ts in (\'2021-01-01 08:00:00.001000\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where tin_ts in (\'1609459200001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where tin_float in (-888.00000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where tin_double in (66666.000000000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + + tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'1609459200001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from in_ts_float_double_3 where in_float in (-888.00000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from in_ts_float_double_3 where in_double in (66666.000000000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + + + tdLog.info("=============== step4.3,multiple column and multiple tag check in function") + cmd1 = '''select * from in_stable_4 + where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\') + and in_float in (0.00000,666.00000,-888.00000) + and in_double in (0.000000000,66666.000000000,-88888.000000000) + and tin_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\') + and tin_float in (0.00000,666.00000,-888.00000) + and tin_double in (0.000000000,66666.000000000,-88888.000000000) + order by ts desc ;''' + tdLog.info(cmd1) + tdSql.query(cmd1) + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.checkData(1,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(1,2,666.00000) + tdSql.checkData(1,3,-88888.000000000) + tdSql.checkData(1,4,'2020-01-01 08:00:00.001') + tdSql.checkData(1,5,666.00000) + tdSql.checkData(1,6,-88888.000000000) + tdSql.checkData(2,1,'1970-01-01 08:00:00.000') + tdSql.checkData(2,2,0.00000) + tdSql.checkData(2,3,0.000000000) + tdSql.checkData(2,4,'1970-01-01 08:00:00.000') + tdSql.checkData(2,5,0.00000) + tdSql.checkData(2,6,0.000000000) + - cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');' - tdLog.info(cmd5) - tdSql.error(cmd5) - try: - tdSql.execute(cmd5) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');' - tdLog.info(cmd6) - tdSql.error(cmd6) - try: - tdSql.execute(cmd6) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');' - tdLog.info(cmd7) - tdSql.error(cmd7) - try: - tdSql.execute(cmd7) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - tdLog.info("=============== step4.3,drop normal table && create table") - cmd1 = 'drop table if exists normal_in_float_double_1 ;' - cmd2 = 'create table normal_in_float_double_1 (ts timestamp,in_float float,in_double double) ; ' + tdLog.info("=============== step4.4,drop normal table && create table") + cmd1 = 'drop table if exists normal_in_ts_float_double_1 ;' + cmd2 = 'create table normal_in_ts_float_double_1 (ts timestamp,in_ts timestamp,in_float float,in_double double) ; ' tdLog.info(cmd1) tdSql.execute(cmd1) tdLog.info(cmd2) tdSql.execute(cmd2) - tdLog.info("=============== step4.4,insert normal table right data and check in function") - cmd1 = 'insert into normal_in_float_double_1 values(now,\'888\',\'666666\') ;' + tdLog.info("=============== step4.5,insert normal table right data and check in function") + cmd1 = 'insert into normal_in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;' tdLog.info(cmd1) - tdSql.execute(cmd1) + tdSql.execute(cmd1) - cmd2 = 'select * from normal_in_float_double_1 where in_float in (\'888\');' - #tdLog.info(cmd2) - #tdSql.error(cmd2) - #try: - # tdSql.execute(cmd2) - # tdLog.exit("invalid operation: not supported filter condition") - #except Exception as e: - # tdLog.info(repr(e)) - # tdLog.info("invalid operation: not supported filter condition") - # - #cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');' - #tdLog.info(cmd3) - #tdSql.error(cmd3) - #try: - # tdSql.execute(cmd3) - # tdLog.exit("invalid operation: not supported filter condition") - #except Exception as e: - # tdLog.info(repr(e)) - # tdLog.info("invalid operation: not supported filter condition") + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'0\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (0.00000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (0.000000000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + + cmd2 = 'insert into normal_in_ts_float_double_1 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;' + tdLog.info(cmd2) + tdSql.execute(cmd2) + + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1577836800001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (666.00000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (-88888.000000000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + + cmd3 = 'insert into normal_in_ts_float_double_1 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;' + tdLog.info(cmd3) + tdSql.execute(cmd3) + + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1609459200001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (-888.00000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (66666.000000000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + + cmd4 = '''select * from normal_in_ts_float_double_1 + where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\') + and in_double in (0.000000000,66666.000000000,-88888.000000000) + and in_float in (0.00000,666.00000,-888.00000) + order by ts desc ;''' + tdLog.info(cmd4) + tdSql.query(cmd4) + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(1,1,'2020-01-01 08:00:00.001') + tdSql.checkData(1,2,666.00000) + tdSql.checkData(1,3,-88888.000000000) + tdSql.checkData(2,1,'1970-01-01 08:00:00.000') + tdSql.checkData(2,2,0.00000) + tdSql.checkData(2,3,0.000000000) + + def stop(self): tdSql.close() From ee974c556393ea675bc84ca55145b9f7418e9b39 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 25 Jun 2021 14:12:37 +0800 Subject: [PATCH 36/37] Update README-CN.md (#6621) --- README-CN.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README-CN.md b/README-CN.md index afb242d621..d5586c78b7 100644 --- a/README-CN.md +++ b/README-CN.md @@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维 TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture) 与 [数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。 -# 生成 +# 构建 TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。 @@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话 git submodule update --init --recursive ``` -## 生成 TDengine +## 构建 TDengine ### Linux 系统 @@ -116,6 +116,12 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` +您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc: +```bash +apt install autoconf +cmake .. -DJEMALLOC_ENABLED=true +``` + 在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。 aarch64: From 360d59428c4b74069c9760feedbff1f16048ba66 Mon Sep 17 00:00:00 2001 From: SunShine Chan Date: Fri, 25 Jun 2021 14:20:39 +0800 Subject: [PATCH 37/37] [TD-4902] FIX -m parameter of taosdemo with letter-initial string (#6620) * [TD-4902] FIX with 1 char * Fix prompt of -m @ taosdemo --- src/kit/taosdemo/taosdemo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 38f0ecb219..0eac1518a7 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -982,9 +982,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->len_of_binary = atoi(argv[++i]); } else if (strcmp(argv[i], "-m") == 0) { if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + (isStringNumber(argv[i+1]))) { printHelp(); - errorPrint("%s", "\n\t-m need a number following!\n"); + errorPrint("%s", "\n\t-m need a letter-initial string following!\n"); exit(EXIT_FAILURE); } arguments->tb_prefix = argv[++i];